Major ia64 update -- now boots dom0.
Signed-off-by: dan.magenheimer@hp.com
Signed-off-by: ian.pratt@cl.cam.ac.uk
3f72f1bdJPsV3JCnBqs9ddL9tr6D2g xen/COPYING
3ddb79bcbOVHh38VJzc97-JEGD4dJQ xen/Makefile
3ddb79bcWnTwYsQRWl_PaneJfa6p0w xen/Rules.mk
+421098b25A0RvuYN3rP28ga3_FN3_Q xen/arch/ia64/Makefile
+421098b2okIeYXS9w9avmSozls61xA xen/arch/ia64/Rules.mk
+421098b21p12UcKjHBrLh_LjlvNEwA xen/arch/ia64/acpi.c
+421098b26C_0yoypoHqjDcJA9UrG_g xen/arch/ia64/asm-offsets.c
+421098b2PHgzf_Gg4R65YRNi_QzMKQ xen/arch/ia64/dom0_ops.c
+421098b2O7jsNfzQXA1v3rbAc1QhpA xen/arch/ia64/dom_fw.c
+421098b2ZlaBcyiuuPr3WpzaSDwg6Q xen/arch/ia64/domain.c
+421098b3LYAS8xJkQiGP7tiTlyBt0Q xen/arch/ia64/idle0_task.c
+421098b3ys5GAr4z6_H1jD33oem82g xen/arch/ia64/irq.c
+421098b3Heh72KuoVlND3CH6c0B0aA xen/arch/ia64/lib/Makefile
+421098b3O0MYMUsmYVFy84VV_1gFwQ xen/arch/ia64/mm_init.c
+421098b39QFMC-1t1r38CA7NxAYBPA xen/arch/ia64/patch/linux-2.6.7/bootmem.h
+421098b3SIA1vZX9fFUjo1T3o_jMCQ xen/arch/ia64/patch/linux-2.6.7/current.h
+421098b3ZBl80iPuSeDU_Id5AgZl0w xen/arch/ia64/patch/linux-2.6.7/efi.c
+421098b3VUmGT2Jdy4SWeDTwcCHaqg xen/arch/ia64/patch/linux-2.6.7/efi.h
+421098b3dPmLXyvKEmvH_2XALeIYlg xen/arch/ia64/patch/linux-2.6.7/entry.S
+421098b3eoimqDUiVw9p_RADfvICwQ xen/arch/ia64/patch/linux-2.6.7/gcc_intrin.h
+421098b3ZcvjJahWCTvmpNb1RWArww xen/arch/ia64/patch/linux-2.6.7/hardirq.h
+421098b3gZO0kxetbOVLlpsFkf0PWQ xen/arch/ia64/patch/linux-2.6.7/head.S
+421098b3Hz4y9vxFo6rZ03PXkFF6-w xen/arch/ia64/patch/linux-2.6.7/hpsim_irq.c
+421098b3mn7maohx9UTPjTZEVov-kg xen/arch/ia64/patch/linux-2.6.7/hpsim_ssc.h
+421098b4HWTbzGFd8fAT27GIavt61g xen/arch/ia64/patch/linux-2.6.7/hw_irq.h
+421098b4wVriEglxpLtvD9NMUr76Ew xen/arch/ia64/patch/linux-2.6.7/ide.h
+421098b4ckKw7I-p3APMhFOuefMWMA xen/arch/ia64/patch/linux-2.6.7/init_task.c
+421098b4CSuWMM-4vHvAa4F4luDOLQ xen/arch/ia64/patch/linux-2.6.7/init_task.h
+421098b4x5Hnxgvf22nhvxzPMszw1g xen/arch/ia64/patch/linux-2.6.7/interrupt.h
+421098b4BgHuG3PiGY2QOQCNEqMYsA xen/arch/ia64/patch/linux-2.6.7/io.h
+421098b4JnNHXkW2732slXwxMX79RA xen/arch/ia64/patch/linux-2.6.7/irq.h
+421098b4H-Upf_mxF2apXBffvYadUw xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c
+421098b4C0Lc3xag4Nm-_yC9IMTDqA xen/arch/ia64/patch/linux-2.6.7/ivt.S
+421098b4weyd0AQTjPLmooChUJm13Q xen/arch/ia64/patch/linux-2.6.7/kregs.h
+421098b4vHCejAUPem4w8p5V-AD1Ig xen/arch/ia64/patch/linux-2.6.7/lds.S
+421098b4uooGl5X8zZM96qpmS0Furg xen/arch/ia64/patch/linux-2.6.7/linuxtime.h
+421098b4awnw3Jf23gohJWoK8s7-Qg xen/arch/ia64/patch/linux-2.6.7/minstate.h
+421098b5hIfMbZlQTfrOKN4BtzJgDQ xen/arch/ia64/patch/linux-2.6.7/mm_bootmem.c
+421098b53IVBoQPcDjFciZy86YEhRQ xen/arch/ia64/patch/linux-2.6.7/mm_contig.c
+421098b5pZw41QuBTvhjvSol6aAHDw xen/arch/ia64/patch/linux-2.6.7/mmzone.h
+421098b5B_dClZDGuPYeY3IXo8Hlbw xen/arch/ia64/patch/linux-2.6.7/page.h
+421098b5saClfxPj36l47H9Um7h1Fw xen/arch/ia64/patch/linux-2.6.7/page_alloc.c
+421098b5OkmcjMBq8gxs7ZrTa4Ao6g xen/arch/ia64/patch/linux-2.6.7/processor.h
+421098b51RLB6jWr6rIlpB2SNObxZg xen/arch/ia64/patch/linux-2.6.7/sal.h
+421098b5WFeRnwGtZnHkSvHVzA4blg xen/arch/ia64/patch/linux-2.6.7/setup.c
+421098b5Jm2i8abzb0mpT6mlEiKZDg xen/arch/ia64/patch/linux-2.6.7/slab.c
+421098b5w6MBnluEpQJAWDTBFrbWSQ xen/arch/ia64/patch/linux-2.6.7/slab.h
+421098b5Cg7nbIXm3RhUF-uG3SKaUA xen/arch/ia64/patch/linux-2.6.7/system.h
+421098b5XrkDYW_Nd9lg5CDgNzHLmg xen/arch/ia64/patch/linux-2.6.7/time.c
+421098b5_kFbvZIIPM3bdCES1Ocqnw xen/arch/ia64/patch/linux-2.6.7/tlb.c
+421098b5DWbgK-tBR4um8PEAqPwqTA xen/arch/ia64/patch/linux-2.6.7/types.h
+421098b5il9YfZM0HpeCnaMgVN_q9g xen/arch/ia64/patch/linux-2.6.7/unaligned.c
+421098b65M5cPramsLGbODg8lQwUjQ xen/arch/ia64/patch/linux-2.6.7/wait.h
+421098b6cYDwzXP86ViTLlTO2x7ovA xen/arch/ia64/pdb-stub.c
41a26ebcqaSGVQ8qTMwpPwOJSJ7qSw xen/arch/ia64/privop.c
41a26ebc4BOHDUsT0TSnryPeV2xfRA xen/arch/ia64/process.c
41a26ebcJ30TFl1v2kR8rqpEBvOtVw xen/arch/ia64/regionreg.c
+421098b69pUiIJrqu_w0JMUnZ2uc2A xen/arch/ia64/smp.c
+421098b6_ToSGrf6Pk1Uwg5aMAIBxg xen/arch/ia64/smpboot.c
+421098b6AUdbxR3wyn1ATcmNuTao_Q xen/arch/ia64/tools/README.xenia64
+421098b6rQ2BQ103qu1n1HNofbS2Og xen/arch/ia64/tools/mkbuildtree
41a26ebc--sjlYZQxmIxyCx3jw70qA xen/arch/ia64/vcpu.c
+421098b6M2WhsJ_ZMzFamAQcdc5gzw xen/arch/ia64/vhpt.c
41a26ebc4jSBGQOuyNIPDST58mNbBw xen/arch/ia64/xenasm.S
+421098b6mWyFPtkhPz9h1LCmKpoCLg xen/arch/ia64/xenmisc.c
+421098b6lY2JzrV1oFDbrt7XQhtElg xen/arch/ia64/xensetup.c
3ddb79bcZbRBzT3elFWSX7u6NtMagQ xen/arch/x86/Makefile
3ddb79bcBQF85CfLS4i1WGZ4oLLaCA xen/arch/x86/Rules.mk
3e5636e5FAYZ5_vQnmgwFJfSdmO5Mw xen/arch/x86/acpi.c
40715b2dKRW7A71SNaeV6zfrEzYxPw xen/include/acpi/platform/acenv.h
40715b2d8fYydJMcODFrV1ocLklGDg xen/include/acpi/platform/acgcc.h
40715b2d1yZkqyAt0kgx2xEwsatuuA xen/include/acpi/platform/aclinux.h
+421098b6Y3xqcv873Gvg1rQ5CChfFw xen/include/asm-ia64/config.h
+421098b6ZcIrn_gdqjUtdJyCE0YkZQ xen/include/asm-ia64/debugger.h
+421098b6z0zSuW1rcSJK1gR8RUi-fw xen/include/asm-ia64/dom_fw.h
+421098b6Nn0I7hGB8Mkd1Cis0KMkhA xen/include/asm-ia64/domain.h
+421098b6X3Fs2yht42TE2ufgKqt2Fw xen/include/asm-ia64/ia64_int.h
+421098b7psFAn8kbeR-vcRCdc860Vw xen/include/asm-ia64/init.h
+421098b7XC1A5PhA-lrU9pIO3sSSmA xen/include/asm-ia64/mm.h
+421098b7c0Dx0ABuW_yHQdAqKhUoiQ xen/include/asm-ia64/mmu_context.h
+421098b7C2dr3O7lgc_oeC9TEE9GKw xen/include/asm-ia64/multicall.h
+421098b7dX_56NCV9zjftqm1yIqC8w xen/include/asm-ia64/offsets.h
+421098b72bPUyviWloEAIB85dGCm2Q xen/include/asm-ia64/privop.h
+421098b7Z6OwjZnrTZkh34DoDfcjrA xen/include/asm-ia64/regionreg.h
+421098b707cY5YluUcWK5Pc-71ETVw xen/include/asm-ia64/regs.h
+421098b7czhvyPGFa5nskL0N4vNvFw xen/include/asm-ia64/shadow.h
+421098b7GkWOnlzSmPvNAhByOSZ1Dw xen/include/asm-ia64/time.h
+421098b7FK3xgShpnH0I0Ou3O4fJ2Q xen/include/asm-ia64/tlb.h
+421098b78IGdFOGUlPmpS7h_QBmoFg xen/include/asm-ia64/vcpu.h
+421098b7PiAencgmBFGAqALU-V5rqQ xen/include/asm-ia64/vhpt.h
+421098b7LfwIHQ2lRYWhO4ruEXqIuQ xen/include/asm-ia64/xenserial.h
40715b2dWe0tDhx9LkLXzTQkvD49RA xen/include/asm-x86/acpi.h
3ddb79c3l4IiQtf6MS2jIzcd-hJS8g xen/include/asm-x86/apic.h
3ddb79c3QJYWr8LLGdonLbWmNb9pQQ xen/include/asm-x86/apicdef.h
40e1966azOJZfNI6Ilthe6Q-T3Hewg xen/include/asm-x86/x86_64/string.h
404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h
400304fcmRQmDdFYEzDh0wcBba9alg xen/include/public/COPYING
+421098b7OKb9YH_EUA_UpCxBjaqtgA xen/include/public/arch-ia64.h
404f1bc68SXxmv0zQpXBWGrCzSyp8w xen/include/public/arch-x86_32.h
404f1bc7IwU-qnH8mJeVu0YsNGMrcw xen/include/public/arch-x86_64.h
3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/public/dom0_ops.h
@mv -f $@.new $@
tools/figlet/figlet: tools/figlet/figlet.o
- $(CC) -o $@ $<
+ $(HOSTCC) -o $@ $<
tools/figlet/figlet.o: tools/figlet/figlet.c
- $(CC) -o $@ -c $<
+ $(HOSTCC) -o $@ -c $<
include/xen/banner.h: tools/figlet/figlet tools/figlet/xen.flf
tools/figlet/figlet -d tools/figlet Xen $(XEN_VERSION).$(XEN_SUBVERSION)$(XEN_EXTRAVERSION) > $@.new
--- /dev/null
+include $(BASEDIR)/Rules.mk
+
+# libs-y += arch/ia64/lib/lib.a
+
+OBJS = xensetup.o setup.o time.o irq.o ia64_ksyms.o process.o smp.o \
+ xenmisc.o pdb-stub.o acpi.o \
+ machvec.o dom0_ops.o domain.o \
+ idle0_task.o pal.o hpsim.o efi.o efi_stub.o ivt.o mm_contig.o \
+ mm_bootmem.o sal.o cmdline.o mm_init.o tlb.o page_alloc.o slab.o \
+ regionreg.o entry.o unaligned.o privop.o vcpu.o \
+ irq_ia64.o irq_lsapic.o hpsim_irq.o vhpt.o xenasm.o dom_fw.o
+# perfmon.o
+# unwind.o needed for kernel unwinding (rare)
+
+OBJS := $(subst $(TARGET_ARCH)/asm-offsets.o,,$(OBJS))
+
+# remove following line if not privifying in memory
+# OBJS += privify.o
+
+# What happens here? We link monitor object files together, starting
+# at MONITOR_BASE (a very high address). But bootloader cannot put
+# things there, so we initially load at LOAD_BASE. A hacky little
+# tool called `elf-reloc' is used to modify segment offsets from
+# MONITOR_BASE-relative to LOAD_BASE-relative.
+# (NB. Linux gets round this by turning its image into raw binary, then
+# wrapping that with a low-memory bootstrapper.)
+default: $(OBJS) head.o ia64lib.o xen.lds.s
+ $(LD) -r -o arch.o $(OBJS) ia64lib.o
+ $(LD) $(LDFLAGS) -T $(BASEDIR)/arch/$(TARGET_ARCH)/xen.lds.s -N \
+ -Map map.out head.o $(ALL_OBJS) -o $(TARGET)-syms
+ $(OBJCOPY) -R .note -R .comment -S $(TARGET)-syms $(TARGET)
+# $(BASEDIR)/tools/elf-reloc $(MONITOR_BASE) $(LOAD_BASE) $(TARGET)
+
+asm-offsets.s: asm-offsets.c
+ $(CC) $(CFLAGS) -S -o $@ $<
+
+# I'm sure a Makefile wizard would know a better way to do this
+xen.lds.s: xen.lds.S
+ $(CC) -E $(CPPFLAGS) -P -DXEN -D__ASSEMBLY__ \
+ -o xen.lds.s xen.lds.S
+
+ia64lib.o:
+ $(MAKE) -C lib && cp lib/ia64lib.o .
+
+clean:
+ rm -f *.o *~ core xen.lds.s
+ $(MAKE) -C lib clean
+
+# setup.o contains bits of compile.h so it must be blown away
+delete-unfresh-files:
+ echo any unfresh-files to delete for ia64\?
+# rm -f setup.o
+
+.PHONY: default clean delete-unfresh-files
+
--- /dev/null
+########################################
+# x86-specific definitions
+
+ifeq ($(COMPILE_ARCH),$(TARGET_ARCH))
+OBJCOPY = objcopy
+endif
+ifneq ($(COMPILE_ARCH),$(TARGET_ARCH))
+CC = /usr/local/sp_env/v2.2/i686/bin/ia64-unknown-linux-gcc
+LD = /usr/local/sp_env/v2.2/i686/bin/ia64-unknown-linux-ld
+OBJCOPY = /usr/local/sp_env/v2.2/i686/bin/ia64-unknown-linux-objcopy
+endif
+HOSTCC := gcc
+#LD := ld
+# Linker should relocate monitor to this address
+MONITOR_BASE := 0xFC500000
+# Bootloader should load monitor to this real address
+LOAD_BASE := 0x00100000
+AFLAGS += -D__ASSEMBLY__
+CPPFLAGS += -I$(BASEDIR)/include -I$(BASEDIR)/include/asm-ia64
+CFLAGS := -nostdinc -fno-builtin -fno-common -fno-strict-aliasing
+#CFLAGS += -O3 # -O3 over-inlines making debugging tough!
+CFLAGS += -O2 # but no optimization causes compile errors!
+CFLAGS += -iwithprefix include -Wall -DMONITOR_BASE=$(MONITOR_BASE)
+CFLAGS += -fomit-frame-pointer -I$(BASEDIR)/include -D__KERNEL__
+CFLAGS += -I$(BASEDIR)/include/asm-ia64
+CFLAGS += -Wno-pointer-arith -Wredundant-decls
+CFLAGS += -DIA64 -DXEN -DLINUX_2_6
+CFLAGS += -ffixed-r13 -mfixed-range=f12-f15,f32-f127
+CFLAGS += -w -g
+#TARGET_CPU := i686
+#CFLAGS += -march=$(TARGET_CPU)
+#LDARCHFLAGS := --oformat elf32-i386
+LDFLAGS := -g
--- /dev/null
+/*
+ * acpi.c - Architecture-Specific Low-Level ACPI Support
+ *
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <xen/config.h>
+#include <xen/kernel.h>
+#include <xen/init.h>
+#include <xen/types.h>
+/*#include <xen/stddef.h>*/
+#include <xen/slab.h>
+#include <xen/pci.h>
+/*#include <xen/bootmem.h>*/
+#include <xen/irq.h>
+#include <xen/acpi.h>
+//#include <asm/mpspec.h>
+#include <asm/io.h>
+//#include <asm/apic.h>
+//#include <asm/apicdef.h>
+#include <asm/page.h>
+/*#include <asm/pgtable.h>*/
+#include <asm/pgalloc.h>
+//#include <asm/io_apic.h>
+#include <asm/acpi.h>
+/*#include <asm/save_state.h>*/
+//#include <asm/smpboot.h>
+
+
+#define PREFIX "ACPI: "
+
+int acpi_lapic = 0;
+int acpi_ioapic = 0;
+
+/* --------------------------------------------------------------------------
+ Boot-time Configuration
+ -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_BOOT
+int acpi_noirq __initdata = 0; /* skip ACPI IRQ initialization */
+int acpi_ht __initdata = 1; /* enable HT */
+
+enum acpi_irq_model_id acpi_irq_model;
+
+
+/*
+ * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
+ * to map the target physical address. The problem is that set_fixmap()
+ * provides a single page, and it is possible that the page is not
+ * sufficient.
+ * By using this area, we can map up to MAX_IO_APICS pages temporarily,
+ * i.e. until the next __va_range() call.
+ *
+ * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
+ * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
+ * count idx down while incrementing the phys address.
+ */
+char *__acpi_map_table(unsigned long phys, unsigned long size)
+{
+ unsigned long base, offset, mapped_size;
+ int idx;
+
+ if (phys + size < 8*1024*1024)
+ return __va(phys);
+
+ offset = phys & (PAGE_SIZE - 1);
+ mapped_size = PAGE_SIZE - offset;
+#ifndef XEN
+// where is FIX_ACPI_*? hack for now, FIXME later
+ set_fixmap(FIX_ACPI_END, phys);
+ base = fix_to_virt(FIX_ACPI_END);
+
+ /*
+ * Most cases can be covered by the below.
+ */
+ idx = FIX_ACPI_END;
+ while (mapped_size < size) {
+ if (--idx < FIX_ACPI_BEGIN)
+ return 0; /* cannot handle this */
+ phys += PAGE_SIZE;
+ set_fixmap(idx, phys);
+ mapped_size += PAGE_SIZE;
+ }
+#endif
+
+ return ((unsigned char *) base + offset);
+}
+
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
+
+
+static int __init
+acpi_parse_madt (
+ unsigned long phys_addr,
+ unsigned long size)
+{
+ struct acpi_table_madt *madt = NULL;
+
+ if (!phys_addr || !size)
+ return -EINVAL;
+
+ madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size);
+ if (!madt) {
+ printk(KERN_WARNING PREFIX "Unable to map MADT\n");
+ return -ENODEV;
+ }
+
+ if (madt->lapic_address)
+ acpi_lapic_addr = (u64) madt->lapic_address;
+
+ printk(KERN_INFO PREFIX "Local APIC address 0x%08x\n",
+ madt->lapic_address);
+
+ detect_clustered_apic(madt->header.oem_id, madt->header.oem_table_id);
+
+ return 0;
+}
+
+
+static int __init
+acpi_parse_lapic (
+ acpi_table_entry_header *header)
+{
+ struct acpi_table_lapic *processor = NULL;
+
+ processor = (struct acpi_table_lapic*) header;
+ if (!processor)
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ mp_register_lapic (
+ processor->id, /* APIC ID */
+ processor->flags.enabled); /* Enabled? */
+
+ return 0;
+}
+
+
+static int __init
+acpi_parse_lapic_addr_ovr (
+ acpi_table_entry_header *header)
+{
+ struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
+
+ lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header;
+ if (!lapic_addr_ovr)
+ return -EINVAL;
+
+ acpi_lapic_addr = lapic_addr_ovr->address;
+
+ return 0;
+}
+
+static int __init
+acpi_parse_lapic_nmi (
+ acpi_table_entry_header *header)
+{
+ struct acpi_table_lapic_nmi *lapic_nmi = NULL;
+
+ lapic_nmi = (struct acpi_table_lapic_nmi*) header;
+ if (!lapic_nmi)
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ if (lapic_nmi->lint != 1)
+ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+
+ return 0;
+}
+
+#endif /*CONFIG_X86_LOCAL_APIC*/
+
+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
+
+static int __init
+acpi_parse_ioapic (
+ acpi_table_entry_header *header)
+{
+ struct acpi_table_ioapic *ioapic = NULL;
+
+ ioapic = (struct acpi_table_ioapic*) header;
+ if (!ioapic)
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ mp_register_ioapic (
+ ioapic->id,
+ ioapic->address,
+ ioapic->global_irq_base);
+
+ return 0;
+}
+
+
+static int __init
+acpi_parse_int_src_ovr (
+ acpi_table_entry_header *header)
+{
+ struct acpi_table_int_src_ovr *intsrc = NULL;
+
+ intsrc = (struct acpi_table_int_src_ovr*) header;
+ if (!intsrc)
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ mp_override_legacy_irq (
+ intsrc->bus_irq,
+ intsrc->flags.polarity,
+ intsrc->flags.trigger,
+ intsrc->global_irq);
+
+ return 0;
+}
+
+
+static int __init
+acpi_parse_nmi_src (
+ acpi_table_entry_header *header)
+{
+ struct acpi_table_nmi_src *nmi_src = NULL;
+
+ nmi_src = (struct acpi_table_nmi_src*) header;
+ if (!nmi_src)
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ /* TBD: Support nimsrc entries? */
+
+ return 0;
+}
+
+#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
+
+
+static unsigned long __init
+acpi_scan_rsdp (
+ unsigned long start,
+ unsigned long length)
+{
+ unsigned long offset = 0;
+ unsigned long sig_len = sizeof("RSD PTR ") - 1;
+
+ /*
+ * Scan all 16-byte boundaries of the physical memory region for the
+ * RSDP signature.
+ */
+ for (offset = 0; offset < length; offset += 16) {
+ if (strncmp((char *) (start + offset), "RSD PTR ", sig_len))
+ continue;
+ return (start + offset);
+ }
+
+ return 0;
+}
+
+
+unsigned long __init
+acpi_find_rsdp (void)
+{
+ unsigned long rsdp_phys = 0;
+
+ /*
+ * Scan memory looking for the RSDP signature. First search EBDA (low
+ * memory) paragraphs and then search upper memory (E0000-FFFFF).
+ */
+ rsdp_phys = acpi_scan_rsdp (0, 0x400);
+ if (!rsdp_phys)
+ rsdp_phys = acpi_scan_rsdp (0xE0000, 0xFFFFF);
+
+ return rsdp_phys;
+}
+
+
+/*
+ * acpi_boot_init()
+ * called from setup_arch(), always.
+ * 1. maps ACPI tables for later use
+ * 2. enumerates lapics
+ * 3. enumerates io-apics
+ *
+ * side effects:
+ * acpi_lapic = 1 if LAPIC found
+ * acpi_ioapic = 1 if IOAPIC found
+ * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
+ * if acpi_blacklisted() acpi_disabled = 1;
+ * acpi_irq_model=...
+ * ...
+ *
+ * return value: (currently ignored)
+ * 0: success
+ * !0: failure
+ */
+int __init
+acpi_boot_init (void)
+{
+ int result = 0;
+
+ if (acpi_disabled && !acpi_ht)
+ return(1);
+
+ /*
+ * The default interrupt routing model is PIC (8259). This gets
+ * overriden if IOAPICs are enumerated (below).
+ */
+ acpi_irq_model = ACPI_IRQ_MODEL_PIC;
+
+ /*
+ * Initialize the ACPI boot-time table parser.
+ */
+ result = acpi_table_init();
+ if (result) {
+#ifndef XEN
+// hack for now, FIXME later
+ acpi_disabled = 1;
+#endif
+ return result;
+ }
+
+ result = acpi_blacklisted();
+ if (result) {
+ printk(KERN_NOTICE PREFIX "BIOS listed in blacklist, disabling ACPI support\n");
+#ifndef XEN
+// hack for now, FIXME later
+ acpi_disabled = 1;
+#endif
+ return result;
+ }
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+ /*
+ * MADT
+ * ----
+ * Parse the Multiple APIC Description Table (MADT), if exists.
+ * Note that this table provides platform SMP configuration
+ * information -- the successor to MPS tables.
+ */
+
+ result = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
+ if (!result) {
+ return 0;
+ }
+ else if (result < 0) {
+ printk(KERN_ERR PREFIX "Error parsing MADT\n");
+ return result;
+ }
+ else if (result > 1)
+ printk(KERN_WARNING PREFIX "Multiple MADT tables exist\n");
+
+ /*
+ * Local APIC
+ * ----------
+ * Note that the LAPIC address is obtained from the MADT (32-bit value)
+ * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
+ */
+
+ result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr);
+ if (result < 0) {
+ printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
+ return result;
+ }
+
+ mp_register_lapic_address(acpi_lapic_addr);
+
+ result = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic);
+ if (!result) {
+ printk(KERN_ERR PREFIX "No LAPIC entries present\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return -ENODEV;
+ }
+ else if (result < 0) {
+ printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return result;
+ }
+
+ result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi);
+ if (result < 0) {
+ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return result;
+ }
+
+ acpi_lapic = 1;
+
+#endif /*CONFIG_X86_LOCAL_APIC*/
+
+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
+
+ /*
+ * I/O APIC
+ * --------
+ */
+
+ /*
+ * ACPI interpreter is required to complete interrupt setup,
+ * so if it is off, don't enumerate the io-apics with ACPI.
+ * If MPS is present, it will handle them,
+ * otherwise the system will stay in PIC mode
+ */
+ if (acpi_disabled || acpi_noirq) {
+ return 1;
+ }
+
+ /*
+ * if "noapic" boot option, don't look for IO-APICs
+ */
+ if (ioapic_setup_disabled()) {
+ printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
+ "due to 'noapic' option.\n");
+ return 1;
+ }
+
+
+ result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic);
+ if (!result) {
+ printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
+ return -ENODEV;
+ }
+ else if (result < 0) {
+ printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
+ return result;
+ }
+
+ /* Build a default routing table for legacy (ISA) interrupts. */
+ mp_config_acpi_legacy_irqs();
+
+ result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr);
+ if (result < 0) {
+ printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return result;
+ }
+
+ result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src);
+ if (result < 0) {
+ printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
+ /* TBD: Cleanup to allow fallback to MPS */
+ return result;
+ }
+
+ acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
+
+ acpi_irq_balance_set(NULL);
+
+ acpi_ioapic = 1;
+
+ if (acpi_lapic && acpi_ioapic)
+ smp_found_config = 1;
+
+#endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
+
+ return 0;
+}
+
+#endif /*CONFIG_ACPI_BOOT*/
+
+#ifdef CONFIG_ACPI_BUS
+/*
+ * "acpi_pic_sci=level" (current default)
+ * programs the PIC-mode SCI to Level Trigger.
+ * (NO-OP if the BIOS set Level Trigger already)
+ *
+ * If a PIC-mode SCI is not recogznied or gives spurious IRQ7's
+ * it may require Edge Trigger -- use "acpi_pic_sci=edge"
+ * (NO-OP if the BIOS set Edge Trigger already)
+ *
+ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
+ * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
+ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
+ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
+ */
+
+static __initdata int acpi_pic_sci_trigger; /* 0: level, 1: edge */
+
+void __init
+acpi_pic_sci_set_trigger(unsigned int irq)
+{
+ unsigned char mask = 1 << (irq & 7);
+ unsigned int port = 0x4d0 + (irq >> 3);
+ unsigned char val = inb(port);
+
+
+ printk(PREFIX "IRQ%d SCI:", irq);
+ if (!(val & mask)) {
+ printk(" Edge");
+
+ if (!acpi_pic_sci_trigger) {
+ printk(" set to Level");
+ outb(val | mask, port);
+ }
+ } else {
+ printk(" Level");
+
+ if (acpi_pic_sci_trigger) {
+ printk(" set to Edge");
+ outb(val | mask, port);
+ }
+ }
+ printk(" Trigger.\n");
+}
+
+int __init
+acpi_pic_sci_setup(char *str)
+{
+ while (str && *str) {
+ if (strncmp(str, "level", 5) == 0)
+ acpi_pic_sci_trigger = 0; /* force level trigger */
+ if (strncmp(str, "edge", 4) == 0)
+ acpi_pic_sci_trigger = 1; /* force edge trigger */
+ str = strchr(str, ',');
+ if (str)
+ str += strspn(str, ", \t");
+ }
+ return 1;
+}
+
+__setup("acpi_pic_sci=", acpi_pic_sci_setup);
+
+#endif /* CONFIG_ACPI_BUS */
+
+
+
+/* --------------------------------------------------------------------------
+ Low-Level Sleep Support
+ -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_SLEEP
+
+#define DEBUG
+
+#ifdef DEBUG
+#include <xen/serial.h>
+#endif
+
+/* address in low memory of the wakeup routine. */
+unsigned long acpi_wakeup_address = 0;
+
+/* new page directory that we will be using */
+static pmd_t *pmd;
+
+/* saved page directory */
+static pmd_t saved_pmd;
+
+/* page which we'll use for the new page directory */
+static pte_t *ptep;
+
+extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
+
+/*
+ * acpi_create_identity_pmd
+ *
+ * Create a new, identity mapped pmd.
+ *
+ * Do this by creating new page directory, and marking all the pages as R/W
+ * Then set it as the new Page Middle Directory.
+ * And, of course, flush the TLB so it takes effect.
+ *
+ * We save the address of the old one, for later restoration.
+ */
+static void acpi_create_identity_pmd (void)
+{
+ pgd_t *pgd;
+ int i;
+
+ ptep = (pte_t*)__get_free_page(GFP_KERNEL);
+
+ /* fill page with low mapping */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte(ptep + i, mk_pte_phys(i << PAGE_SHIFT, PAGE_SHARED));
+
+ pgd = pgd_offset(current->active_mm, 0);
+ pmd = pmd_alloc(current->mm,pgd, 0);
+
+ /* save the old pmd */
+ saved_pmd = *pmd;
+
+ /* set the new one */
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(ptep)));
+
+ /* flush the TLB */
+ local_flush_tlb();
+}
+
+/*
+ * acpi_restore_pmd
+ *
+ * Restore the old pmd saved by acpi_create_identity_pmd and
+ * free the page that said function alloc'd
+ */
+static void acpi_restore_pmd (void)
+{
+ set_pmd(pmd, saved_pmd);
+ local_flush_tlb();
+ free_page((unsigned long)ptep);
+}
+
+/**
+ * acpi_save_state_mem - save kernel state
+ *
+ * Create an identity mapped page table and copy the wakeup routine to
+ * low memory.
+ */
+int acpi_save_state_mem (void)
+{
+ acpi_create_identity_pmd();
+ acpi_copy_wakeup_routine(acpi_wakeup_address);
+
+ return 0;
+}
+
+/**
+ * acpi_save_state_disk - save kernel state to disk
+ *
+ */
+int acpi_save_state_disk (void)
+{
+ return 1;
+}
+
+/*
+ * acpi_restore_state
+ */
+void acpi_restore_state_mem (void)
+{
+ acpi_restore_pmd();
+}
+
+/**
+ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
+ *
+ * We allocate a page in low memory for the wakeup
+ * routine for when we come back from a sleep state. The
+ * runtime allocator allows specification of <16M pages, but not
+ * <1M pages.
+ */
+void __init acpi_reserve_bootmem(void)
+{
+ acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
+ printk(KERN_DEBUG "ACPI: have wakeup address 0x%8.8lx\n", acpi_wakeup_address);
+}
+
+void do_suspend_lowlevel_s4bios(int resume)
+{
+ if (!resume) {
+ save_processor_context();
+ acpi_save_register_state((unsigned long)&&acpi_sleep_done);
+ acpi_enter_sleep_state_s4bios();
+ return;
+ }
+acpi_sleep_done:
+ restore_processor_context();
+}
+
+
+#endif /*CONFIG_ACPI_SLEEP*/
+
--- /dev/null
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+#define task_struct exec_domain
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+#define OFFSET(_sym, _str, _mem) \
+ DEFINE(_sym, offsetof(_str, _mem));
+
+void foo(void)
+{
+ DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct));
+ DEFINE(IA64_THREAD_INFO_SIZE, sizeof (struct thread_info));
+ DEFINE(IA64_PT_REGS_SIZE, sizeof (struct pt_regs));
+ DEFINE(IA64_SWITCH_STACK_SIZE, sizeof (struct switch_stack));
+ //DEFINE(IA64_SIGINFO_SIZE, sizeof (struct siginfo));
+ DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
+ //DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
+ DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
+
+ BLANK();
+
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+
+ BLANK();
+
+ //DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
+ //DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
+ //DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
+ //DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
+ //DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
+ //DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
+ //DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand));
+ //DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
+ //DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
+ DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct task_struct, thread.ksp));
+ DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct task_struct, thread.on_ustack));
+
+ BLANK();
+
+ //DEFINE(IA64_SIGHAND_SIGLOCK_OFFSET,offsetof (struct sighand_struct, siglock));
+
+ BLANK();
+
+ //DEFINE(IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,offsetof (struct signal_struct,
+ //group_stop_count));
+ //DEFINE(IA64_SIGNAL_SHARED_PENDING_OFFSET,offsetof (struct signal_struct, shared_pending));
+
+ BLANK();
+
+ DEFINE(IA64_PT_REGS_B6_OFFSET, offsetof (struct pt_regs, b6));
+ DEFINE(IA64_PT_REGS_B7_OFFSET, offsetof (struct pt_regs, b7));
+ DEFINE(IA64_PT_REGS_AR_CSD_OFFSET, offsetof (struct pt_regs, ar_csd));
+ DEFINE(IA64_PT_REGS_AR_SSD_OFFSET, offsetof (struct pt_regs, ar_ssd));
+ DEFINE(IA64_PT_REGS_R8_OFFSET, offsetof (struct pt_regs, r8));
+ DEFINE(IA64_PT_REGS_R9_OFFSET, offsetof (struct pt_regs, r9));
+ DEFINE(IA64_PT_REGS_R10_OFFSET, offsetof (struct pt_regs, r10));
+ DEFINE(IA64_PT_REGS_R11_OFFSET, offsetof (struct pt_regs, r11));
+ DEFINE(IA64_PT_REGS_CR_IPSR_OFFSET, offsetof (struct pt_regs, cr_ipsr));
+ DEFINE(IA64_PT_REGS_CR_IIP_OFFSET, offsetof (struct pt_regs, cr_iip));
+ DEFINE(IA64_PT_REGS_CR_IFS_OFFSET, offsetof (struct pt_regs, cr_ifs));
+ DEFINE(IA64_PT_REGS_AR_UNAT_OFFSET, offsetof (struct pt_regs, ar_unat));
+ DEFINE(IA64_PT_REGS_AR_PFS_OFFSET, offsetof (struct pt_regs, ar_pfs));
+ DEFINE(IA64_PT_REGS_AR_RSC_OFFSET, offsetof (struct pt_regs, ar_rsc));
+ DEFINE(IA64_PT_REGS_AR_RNAT_OFFSET, offsetof (struct pt_regs, ar_rnat));
+
+ DEFINE(IA64_PT_REGS_AR_BSPSTORE_OFFSET, offsetof (struct pt_regs, ar_bspstore));
+ DEFINE(IA64_PT_REGS_PR_OFFSET, offsetof (struct pt_regs, pr));
+ DEFINE(IA64_PT_REGS_B0_OFFSET, offsetof (struct pt_regs, b0));
+ DEFINE(IA64_PT_REGS_LOADRS_OFFSET, offsetof (struct pt_regs, loadrs));
+ DEFINE(IA64_PT_REGS_R1_OFFSET, offsetof (struct pt_regs, r1));
+ DEFINE(IA64_PT_REGS_R12_OFFSET, offsetof (struct pt_regs, r12));
+ DEFINE(IA64_PT_REGS_R13_OFFSET, offsetof (struct pt_regs, r13));
+ DEFINE(IA64_PT_REGS_AR_FPSR_OFFSET, offsetof (struct pt_regs, ar_fpsr));
+ DEFINE(IA64_PT_REGS_R15_OFFSET, offsetof (struct pt_regs, r15));
+ DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
+ DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
+ DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
+ DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
+ DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
+ DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
+ DEFINE(IA64_PT_REGS_R19_OFFSET, offsetof (struct pt_regs, r19));
+ DEFINE(IA64_PT_REGS_R20_OFFSET, offsetof (struct pt_regs, r20));
+ DEFINE(IA64_PT_REGS_R21_OFFSET, offsetof (struct pt_regs, r21));
+ DEFINE(IA64_PT_REGS_R22_OFFSET, offsetof (struct pt_regs, r22));
+ DEFINE(IA64_PT_REGS_R23_OFFSET, offsetof (struct pt_regs, r23));
+ DEFINE(IA64_PT_REGS_R24_OFFSET, offsetof (struct pt_regs, r24));
+ DEFINE(IA64_PT_REGS_R25_OFFSET, offsetof (struct pt_regs, r25));
+ DEFINE(IA64_PT_REGS_R26_OFFSET, offsetof (struct pt_regs, r26));
+ DEFINE(IA64_PT_REGS_R27_OFFSET, offsetof (struct pt_regs, r27));
+ DEFINE(IA64_PT_REGS_R28_OFFSET, offsetof (struct pt_regs, r28));
+ DEFINE(IA64_PT_REGS_R29_OFFSET, offsetof (struct pt_regs, r29));
+ DEFINE(IA64_PT_REGS_R30_OFFSET, offsetof (struct pt_regs, r30));
+ DEFINE(IA64_PT_REGS_R31_OFFSET, offsetof (struct pt_regs, r31));
+ DEFINE(IA64_PT_REGS_AR_CCV_OFFSET, offsetof (struct pt_regs, ar_ccv));
+ DEFINE(IA64_PT_REGS_F6_OFFSET, offsetof (struct pt_regs, f6));
+ DEFINE(IA64_PT_REGS_F7_OFFSET, offsetof (struct pt_regs, f7));
+ DEFINE(IA64_PT_REGS_F8_OFFSET, offsetof (struct pt_regs, f8));
+ DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
+ DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
+ DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));
+
+ BLANK();
+
+ DEFINE(IA64_SWITCH_STACK_CALLER_UNAT_OFFSET, offsetof (struct switch_stack, caller_unat));
+ DEFINE(IA64_SWITCH_STACK_AR_FPSR_OFFSET, offsetof (struct switch_stack, ar_fpsr));
+ DEFINE(IA64_SWITCH_STACK_F2_OFFSET, offsetof (struct switch_stack, f2));
+ DEFINE(IA64_SWITCH_STACK_F3_OFFSET, offsetof (struct switch_stack, f3));
+ DEFINE(IA64_SWITCH_STACK_F4_OFFSET, offsetof (struct switch_stack, f4));
+ DEFINE(IA64_SWITCH_STACK_F5_OFFSET, offsetof (struct switch_stack, f5));
+ DEFINE(IA64_SWITCH_STACK_F12_OFFSET, offsetof (struct switch_stack, f12));
+ DEFINE(IA64_SWITCH_STACK_F13_OFFSET, offsetof (struct switch_stack, f13));
+ DEFINE(IA64_SWITCH_STACK_F14_OFFSET, offsetof (struct switch_stack, f14));
+ DEFINE(IA64_SWITCH_STACK_F15_OFFSET, offsetof (struct switch_stack, f15));
+ DEFINE(IA64_SWITCH_STACK_F16_OFFSET, offsetof (struct switch_stack, f16));
+ DEFINE(IA64_SWITCH_STACK_F17_OFFSET, offsetof (struct switch_stack, f17));
+ DEFINE(IA64_SWITCH_STACK_F18_OFFSET, offsetof (struct switch_stack, f18));
+ DEFINE(IA64_SWITCH_STACK_F19_OFFSET, offsetof (struct switch_stack, f19));
+ DEFINE(IA64_SWITCH_STACK_F20_OFFSET, offsetof (struct switch_stack, f20));
+ DEFINE(IA64_SWITCH_STACK_F21_OFFSET, offsetof (struct switch_stack, f21));
+ DEFINE(IA64_SWITCH_STACK_F22_OFFSET, offsetof (struct switch_stack, f22));
+ DEFINE(IA64_SWITCH_STACK_F23_OFFSET, offsetof (struct switch_stack, f23));
+ DEFINE(IA64_SWITCH_STACK_F24_OFFSET, offsetof (struct switch_stack, f24));
+ DEFINE(IA64_SWITCH_STACK_F25_OFFSET, offsetof (struct switch_stack, f25));
+ DEFINE(IA64_SWITCH_STACK_F26_OFFSET, offsetof (struct switch_stack, f26));
+ DEFINE(IA64_SWITCH_STACK_F27_OFFSET, offsetof (struct switch_stack, f27));
+ DEFINE(IA64_SWITCH_STACK_F28_OFFSET, offsetof (struct switch_stack, f28));
+ DEFINE(IA64_SWITCH_STACK_F29_OFFSET, offsetof (struct switch_stack, f29));
+ DEFINE(IA64_SWITCH_STACK_F30_OFFSET, offsetof (struct switch_stack, f30));
+ DEFINE(IA64_SWITCH_STACK_F31_OFFSET, offsetof (struct switch_stack, f31));
+ DEFINE(IA64_SWITCH_STACK_R4_OFFSET, offsetof (struct switch_stack, r4));
+ DEFINE(IA64_SWITCH_STACK_R5_OFFSET, offsetof (struct switch_stack, r5));
+ DEFINE(IA64_SWITCH_STACK_R6_OFFSET, offsetof (struct switch_stack, r6));
+ DEFINE(IA64_SWITCH_STACK_R7_OFFSET, offsetof (struct switch_stack, r7));
+ DEFINE(IA64_SWITCH_STACK_B0_OFFSET, offsetof (struct switch_stack, b0));
+ DEFINE(IA64_SWITCH_STACK_B1_OFFSET, offsetof (struct switch_stack, b1));
+ DEFINE(IA64_SWITCH_STACK_B2_OFFSET, offsetof (struct switch_stack, b2));
+ DEFINE(IA64_SWITCH_STACK_B3_OFFSET, offsetof (struct switch_stack, b3));
+ DEFINE(IA64_SWITCH_STACK_B4_OFFSET, offsetof (struct switch_stack, b4));
+ DEFINE(IA64_SWITCH_STACK_B5_OFFSET, offsetof (struct switch_stack, b5));
+ DEFINE(IA64_SWITCH_STACK_AR_PFS_OFFSET, offsetof (struct switch_stack, ar_pfs));
+ DEFINE(IA64_SWITCH_STACK_AR_LC_OFFSET, offsetof (struct switch_stack, ar_lc));
+ DEFINE(IA64_SWITCH_STACK_AR_UNAT_OFFSET, offsetof (struct switch_stack, ar_unat));
+ DEFINE(IA64_SWITCH_STACK_AR_RNAT_OFFSET, offsetof (struct switch_stack, ar_rnat));
+ DEFINE(IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET, offsetof (struct switch_stack, ar_bspstore));
+ DEFINE(IA64_SWITCH_STACK_PR_OFFSET, offsetof (struct switch_stack, pr));
+
+ BLANK();
+
+ //DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip));
+ //DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp));
+ //DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr));
+ //DEFINE(IA64_SIGCONTEXT_AR_RNAT_OFFSET, offsetof (struct sigcontext, sc_ar_rnat));
+ //DEFINE(IA64_SIGCONTEXT_AR_UNAT_OFFSET, offsetof (struct sigcontext, sc_ar_unat));
+ //DEFINE(IA64_SIGCONTEXT_B0_OFFSET, offsetof (struct sigcontext, sc_br[0]));
+ //DEFINE(IA64_SIGCONTEXT_CFM_OFFSET, offsetof (struct sigcontext, sc_cfm));
+ //DEFINE(IA64_SIGCONTEXT_FLAGS_OFFSET, offsetof (struct sigcontext, sc_flags));
+ //DEFINE(IA64_SIGCONTEXT_FR6_OFFSET, offsetof (struct sigcontext, sc_fr[6]));
+ //DEFINE(IA64_SIGCONTEXT_PR_OFFSET, offsetof (struct sigcontext, sc_pr));
+ //DEFINE(IA64_SIGCONTEXT_R12_OFFSET, offsetof (struct sigcontext, sc_gr[12]));
+ //DEFINE(IA64_SIGCONTEXT_RBS_BASE_OFFSET,offsetof (struct sigcontext, sc_rbs_base));
+ //DEFINE(IA64_SIGCONTEXT_LOADRS_OFFSET, offsetof (struct sigcontext, sc_loadrs));
+
+ //BLANK();
+
+ //DEFINE(IA64_SIGPENDING_SIGNAL_OFFSET, offsetof (struct sigpending, signal));
+
+ //BLANK();
+
+ //DEFINE(IA64_SIGFRAME_ARG0_OFFSET, offsetof (struct sigframe, arg0));
+ //DEFINE(IA64_SIGFRAME_ARG1_OFFSET, offsetof (struct sigframe, arg1));
+ //DEFINE(IA64_SIGFRAME_ARG2_OFFSET, offsetof (struct sigframe, arg2));
+ //DEFINE(IA64_SIGFRAME_HANDLER_OFFSET, offsetof (struct sigframe, handler));
+ //DEFINE(IA64_SIGFRAME_SIGCONTEXT_OFFSET, offsetof (struct sigframe, sc));
+ //BLANK();
+ /* for assembly files which can't include sched.h: */
+ //DEFINE(IA64_CLONE_VFORK, CLONE_VFORK);
+ //DEFINE(IA64_CLONE_VM, CLONE_VM);
+
+ BLANK();
+ DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
+ DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
+
+
+ DEFINE(CLONE_IDLETASK_BIT, 12);
+ DEFINE(CLONE_SETTLS_BIT, 19);
+//#if CLONE_SETTLS != (1<<19)
+//# error "CLONE_SETTLS_BIT incorrect, please fix"
+//#endif
+
+ //BLANK();
+ //DEFINE(IA64_MCA_TLB_INFO_SIZE, sizeof (struct ia64_mca_tlb_info));
+ /* used by head.S */
+ DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
+
+ BLANK();
+ /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
+ //DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
+ //DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
+ //DEFINE(IA64_TIME_INTERPOLATOR_SHIFT_OFFSET, offsetof (struct time_interpolator, shift));
+ //DEFINE(IA64_TIME_INTERPOLATOR_NSEC_OFFSET, offsetof (struct time_interpolator, nsec_per_cyc));
+ //DEFINE(IA64_TIME_INTERPOLATOR_OFFSET_OFFSET, offsetof (struct time_interpolator, offset));
+ //DEFINE(IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET, offsetof (struct time_interpolator, last_cycle));
+ //DEFINE(IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET, offsetof (struct time_interpolator, last_counter));
+ //DEFINE(IA64_TIME_INTERPOLATOR_JITTER_OFFSET, offsetof (struct time_interpolator, jitter));
+ //DEFINE(IA64_TIME_INTERPOLATOR_MASK_OFFSET, offsetof (struct time_interpolator, mask));
+ //DEFINE(IA64_TIME_SOURCE_CPU, TIME_SOURCE_CPU);
+ //DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
+ //DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
+ //DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
+}
--- /dev/null
+/******************************************************************************
+ * Arch-specific dom0_ops.c
+ *
+ * Process command requests from domain-0 guest OS.
+ *
+ * Copyright (c) 2002, K A Fraser
+ */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/lib.h>
+#include <xen/mm.h>
+#include <public/dom0_ops.h>
+#include <xen/sched.h>
+#include <xen/event.h>
+#include <asm/domain_page.h>
+//#include <asm/msr.h>
+#include <asm/pdb.h>
+#include <xen/trace.h>
+#include <xen/console.h>
+//#include <xen/shadow.h>
+#include <public/sched_ctl.h>
+
+#define TRC_DOM0OP_ENTER_BASE 0x00020000
+#define TRC_DOM0OP_LEAVE_BASE 0x00030000
+
+extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
+
+static int msr_cpu_mask;
+static unsigned long msr_addr;
+static unsigned long msr_lo;
+static unsigned long msr_hi;
+
+long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
+{
+ long ret = 0;
+
+ if ( !IS_PRIV(current->domain) )
+ return -EPERM;
+
+ switch ( op->cmd )
+ {
+
+ default:
+ ret = -ENOSYS;
+
+ }
+
+ return ret;
+}
+
+void arch_getdomaininfo_ctxt(struct domain *d, full_execution_context_t *c)
+{
+ int i;
+
+ dummy();
+}
--- /dev/null
+/*
+ * Xen domain firmware emulation support
+ * Copyright (C) 2004 Hewlett-Packard Co.
+ * Dan Magenheimer (dan.magenheimer@hp.com)
+ *
+ */
+
+#include <xen/config.h>
+#include <asm/system.h>
+#include <asm/pgalloc.h>
+
+#ifdef CONFIG_PCI
+# include <linux/pci.h>
+#endif
+
+#include <linux/efi.h>
+#include <asm/io.h>
+#include <asm/pal.h>
+#include <asm/sal.h>
+
+#include <asm/dom_fw.h>
+
+struct ia64_boot_param *dom_fw_init(struct domain *, char *,int,char *,int);
+extern unsigned long domain_mpa_to_imva(struct domain *,unsigned long mpaddr);
+extern struct domain *dom0;
+extern unsigned long dom0_start;
+
+extern unsigned long running_on_sim;
+
+
+unsigned long dom_fw_base_mpa = -1;
+unsigned long imva_fw_base = -1;
+
+// return domain (meta)physical address for a given imva
+// this function is a call-back from dom_fw_init
+unsigned long dom_pa(unsigned long imva)
+{
+ if (dom_fw_base_mpa == -1 || imva_fw_base == -1) {
+ printf("dom_pa: uninitialized! (spinning...)\n");
+ while(1);
+ }
+ if (imva - imva_fw_base > PAGE_SIZE) {
+ printf("dom_pa: bad offset! imva=%p, imva_fw_base=%p (spinning...)\n",imva,imva_fw_base);
+ while(1);
+ }
+ return dom_fw_base_mpa + (imva - imva_fw_base);
+}
+
+// builds a hypercall bundle at domain physical address
+void dom_efi_hypercall_patch(struct domain *d, unsigned long paddr, unsigned long hypercall)
+{
+ unsigned long imva;
+
+ if (d == dom0) paddr += dom0_start;
+ imva = domain_mpa_to_imva(d,paddr);
+ build_hypercall_bundle(imva,d->breakimm,hypercall,1);
+}
+
+
+// builds a hypercall bundle at domain physical address
+void dom_fw_hypercall_patch(struct domain *d, unsigned long paddr, unsigned long hypercall,unsigned long ret)
+{
+ unsigned long imva;
+
+ if (d == dom0) paddr += dom0_start;
+ imva = domain_mpa_to_imva(d,paddr);
+ build_hypercall_bundle(imva,d->breakimm,hypercall,ret);
+}
+
+
+// FIXME: This is really a hack: Forcing the boot parameter block
+// at domain mpaddr 0 page, then grabbing only the low bits of the
+// Xen imva, which is the offset into the page
+unsigned long dom_fw_setup(struct domain *d, char *args, int arglen)
+{
+ struct ia64_boot_param *bp;
+
+ dom_fw_base_mpa = 0;
+ if (d == dom0) dom_fw_base_mpa += dom0_start;
+ imva_fw_base = domain_mpa_to_imva(d,dom_fw_base_mpa);
+ bp = dom_fw_init(d,args,arglen,imva_fw_base,PAGE_SIZE);
+ return dom_pa((unsigned long)bp);
+}
+
+
+/* the following heavily leveraged from linux/arch/ia64/hp/sim/fw-emu.c */
+
+#define MB (1024*1024UL)
+
+#define NUM_EFI_SYS_TABLES 6
+#define PASS_THRU_IOPORT_SPACE
+#ifdef PASS_THRU_IOPORT_SPACE
+# define NUM_MEM_DESCS 4
+#else
+# define NUM_MEM_DESCS 3
+#endif
+
+
+#define SECS_PER_HOUR (60 * 60)
+#define SECS_PER_DAY (SECS_PER_HOUR * 24)
+
+/* Compute the `struct tm' representation of *T,
+ offset OFFSET seconds east of UTC,
+ and store year, yday, mon, mday, wday, hour, min, sec into *TP.
+ Return nonzero if successful. */
+int
+offtime (unsigned long t, efi_time_t *tp)
+{
+ const unsigned short int __mon_yday[2][13] =
+ {
+ /* Normal years. */
+ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+ /* Leap years. */
+ { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+ };
+ long int days, rem, y;
+ const unsigned short int *ip;
+
+ days = t / SECS_PER_DAY;
+ rem = t % SECS_PER_DAY;
+ while (rem < 0) {
+ rem += SECS_PER_DAY;
+ --days;
+ }
+ while (rem >= SECS_PER_DAY) {
+ rem -= SECS_PER_DAY;
+ ++days;
+ }
+ tp->hour = rem / SECS_PER_HOUR;
+ rem %= SECS_PER_HOUR;
+ tp->minute = rem / 60;
+ tp->second = rem % 60;
+ /* January 1, 1970 was a Thursday. */
+ y = 1970;
+
+# define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
+# define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
+# define __isleap(year) \
+ ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
+
+ while (days < 0 || days >= (__isleap (y) ? 366 : 365)) {
+ /* Guess a corrected year, assuming 365 days per year. */
+ long int yg = y + days / 365 - (days % 365 < 0);
+
+ /* Adjust DAYS and Y to match the guessed year. */
+ days -= ((yg - y) * 365 + LEAPS_THRU_END_OF (yg - 1)
+ - LEAPS_THRU_END_OF (y - 1));
+ y = yg;
+ }
+ tp->year = y;
+ ip = __mon_yday[__isleap(y)];
+ for (y = 11; days < (long int) ip[y]; --y)
+ continue;
+ days -= ip[y];
+ tp->month = y + 1;
+ tp->day = days + 1;
+ return 1;
+}
+
+extern void pal_emulator_static (void);
+
+/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
+
+#define BUILD_CMD(addr) ((0x80000000 | (addr)) & ~3)
+
+#define REG_OFFSET(addr) (0x00000000000000FF & (addr))
+#define DEVICE_FUNCTION(addr) (0x000000000000FF00 & (addr))
+#define BUS_NUMBER(addr) (0x0000000000FF0000 & (addr))
+
+#ifndef XEN
+static efi_status_t
+fw_efi_get_time (efi_time_t *tm, efi_time_cap_t *tc)
+{
+#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
+ struct {
+ int tv_sec; /* must be 32bits to work */
+ int tv_usec;
+ } tv32bits;
+
+ ssc((unsigned long) &tv32bits, 0, 0, 0, SSC_GET_TOD);
+
+ memset(tm, 0, sizeof(*tm));
+ offtime(tv32bits.tv_sec, tm);
+
+ if (tc)
+ memset(tc, 0, sizeof(*tc));
+#else
+# error Not implemented yet...
+#endif
+ return EFI_SUCCESS;
+}
+
+static void
+efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data)
+{
+#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
+ ssc(status, 0, 0, 0, SSC_EXIT);
+#else
+# error Not implemented yet...
+#endif
+}
+
+static efi_status_t
+efi_unimplemented (void)
+{
+ return EFI_UNSUPPORTED;
+}
+#endif /* !XEN */
+
+struct sal_ret_values
+sal_emulator (long index, unsigned long in1, unsigned long in2,
+ unsigned long in3, unsigned long in4, unsigned long in5,
+ unsigned long in6, unsigned long in7)
+{
+ long r9 = 0;
+ long r10 = 0;
+ long r11 = 0;
+ long status;
+
+ /*
+ * Don't do a "switch" here since that gives us code that
+ * isn't self-relocatable.
+ */
+ status = 0;
+ if (index == SAL_FREQ_BASE) {
+ switch (in1) {
+ case SAL_FREQ_BASE_PLATFORM:
+ r9 = 200000000;
+ break;
+
+ case SAL_FREQ_BASE_INTERVAL_TIMER:
+ /*
+ * Is this supposed to be the cr.itc frequency
+ * or something platform specific? The SAL
+ * doc ain't exactly clear on this...
+ */
+ r9 = 700000000;
+ break;
+
+ case SAL_FREQ_BASE_REALTIME_CLOCK:
+ r9 = 1;
+ break;
+
+ default:
+ status = -1;
+ break;
+ }
+ } else if (index == SAL_PCI_CONFIG_READ) {
+ if (current->domain == dom0) {
+ u64 value;
+ // note that args 2&3 are swapped!!
+ status = ia64_sal_pci_config_read(in1,in3,in2,&value);
+ r9 = value;
+ }
+ else printf("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
+ } else if (index == SAL_PCI_CONFIG_WRITE) {
+ if (current->domain == dom0) {
+ if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
+ (in4 > 1) ||
+ (in2 > 8) || (in2 & (in2-1)))
+ printf("*** SAL_PCI_CONF_WRITE?!?(adr=%p,typ=%p,sz=%p,val=%p)\n",in1,in4,in2,in3);
+ // note that args are in a different order!!
+ status = ia64_sal_pci_config_write(in1,in4,in2,in3);
+ }
+ else printf("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
+ } else if (index == SAL_SET_VECTORS) {
+ printf("*** CALLED SAL_SET_VECTORS. IGNORED...\n");
+ } else if (index == SAL_GET_STATE_INFO) {
+ printf("*** CALLED SAL_GET_STATE_INFO. IGNORED...\n");
+ } else if (index == SAL_GET_STATE_INFO_SIZE) {
+ printf("*** CALLED SAL_GET_STATE_INFO_SIZE. IGNORED...\n");
+ } else if (index == SAL_CLEAR_STATE_INFO) {
+ printf("*** CALLED SAL_CLEAR_STATE_INFO. IGNORED...\n");
+ } else if (index == SAL_MC_RENDEZ) {
+ printf("*** CALLED SAL_MC_RENDEZ. IGNORED...\n");
+ } else if (index == SAL_MC_SET_PARAMS) {
+ printf("*** CALLED SAL_MC_SET_PARAMS. IGNORED...\n");
+ } else if (index == SAL_CACHE_FLUSH) {
+ printf("*** CALLED SAL_CACHE_FLUSH. IGNORED...\n");
+ } else if (index == SAL_CACHE_INIT) {
+ printf("*** CALLED SAL_CACHE_INIT. IGNORED...\n");
+ } else if (index == SAL_UPDATE_PAL) {
+ printf("*** CALLED SAL_UPDATE_PAL. IGNORED...\n");
+ } else {
+ printf("*** CALLED SAL_ WITH UNKNOWN INDEX. IGNORED...\n");
+ status = -1;
+ }
+ return ((struct sal_ret_values) {status, r9, r10, r11});
+}
+
+
+#define NFUNCPTRS 20
+
+void print_md(efi_memory_desc_t *md)
+{
+#if 1
+ printk("domain mem: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
+ md->type, md->attribute, md->phys_addr,
+ md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
+ md->num_pages >> (20 - EFI_PAGE_SHIFT));
+#endif
+}
+
+struct ia64_boot_param *
+dom_fw_init (struct domain *d, char *args, int arglen, char *fw_mem, int fw_mem_size)
+{
+ efi_system_table_t *efi_systab;
+ efi_runtime_services_t *efi_runtime;
+ efi_config_table_t *efi_tables;
+ struct ia64_sal_systab *sal_systab;
+ efi_memory_desc_t *efi_memmap, *md;
+ unsigned long *pal_desc, *sal_desc;
+ struct ia64_sal_desc_entry_point *sal_ed;
+ struct ia64_boot_param *bp;
+ unsigned long *pfn;
+ unsigned char checksum = 0;
+ char *cp, *cmd_line, *fw_vendor;
+ int i = 0;
+ unsigned long maxmem = d->max_pages * PAGE_SIZE;
+ unsigned long start_mpaddr = ((d==dom0)?dom0_start:0);
+
+# define MAKE_MD(typ, attr, start, end) \
+ do { \
+ md = efi_memmap + i++; \
+ md->type = typ; \
+ md->pad = 0; \
+ md->phys_addr = start_mpaddr + start; \
+ md->virt_addr = 0; \
+ md->num_pages = (end - start) >> 12; \
+ md->attribute = attr; \
+ print_md(md); \
+ } while (0)
+
+/* FIXME: should check size but for now we have a whole MB to play with.
+ And if stealing code from fw-emu.c, watch out for new fw_vendor on the end!
+ if (fw_mem_size < sizeof(fw_mem_proto)) {
+ printf("sys_fw_init: insufficient space for fw_mem\n");
+ return 0;
+ }
+*/
+ memset(fw_mem, 0, fw_mem_size);
+
+#ifdef XEN
+#else
+ pal_desc = (unsigned long *) &pal_emulator_static;
+ sal_desc = (unsigned long *) &sal_emulator;
+#endif
+
+ cp = fw_mem;
+ efi_systab = (void *) cp; cp += sizeof(*efi_systab);
+ efi_runtime = (void *) cp; cp += sizeof(*efi_runtime);
+ efi_tables = (void *) cp; cp += NUM_EFI_SYS_TABLES * sizeof(*efi_tables);
+ sal_systab = (void *) cp; cp += sizeof(*sal_systab);
+ sal_ed = (void *) cp; cp += sizeof(*sal_ed);
+ efi_memmap = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap);
+ bp = (void *) cp; cp += sizeof(*bp);
+ pfn = (void *) cp; cp += NFUNCPTRS * 2 * sizeof(pfn);
+ cmd_line = (void *) cp;
+
+ if (args) {
+ if (arglen >= 1024)
+ arglen = 1023;
+ memcpy(cmd_line, args, arglen);
+ } else {
+ arglen = 0;
+ }
+ cmd_line[arglen] = '\0';
+
+ memset(efi_systab, 0, sizeof(efi_systab));
+ efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE;
+ efi_systab->hdr.revision = EFI_SYSTEM_TABLE_REVISION;
+ efi_systab->hdr.headersize = sizeof(efi_systab->hdr);
+ cp = fw_vendor = &cmd_line[arglen] + (2-(arglen&1)); // round to 16-bit boundary
+#define FW_VENDOR "X\0e\0n\0/\0i\0a\0\066\0\064\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ cp += sizeof(FW_VENDOR) + (8-((unsigned long)cp & 7)); // round to 64-bit boundary
+
+ memcpy(fw_vendor,FW_VENDOR,sizeof(FW_VENDOR));
+ efi_systab->fw_vendor = dom_pa(fw_vendor);
+
+ efi_systab->fw_revision = 1;
+ efi_systab->runtime = (void *) dom_pa(efi_runtime);
+ efi_systab->nr_tables = NUM_EFI_SYS_TABLES;
+ efi_systab->tables = dom_pa(efi_tables);
+
+ efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
+ efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
+ efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr);
+#define EFI_HYPERCALL_PATCH(tgt,call) do { \
+ dom_efi_hypercall_patch(d,FW_HYPERCALL_##call##_PADDR,FW_HYPERCALL_##call); \
+ tgt = dom_pa(pfn); \
+ *pfn++ = FW_HYPERCALL_##call##_PADDR + ((d==dom0)?dom0_start:0); \
+ *pfn++ = 0; \
+ } while (0)
+
+ EFI_HYPERCALL_PATCH(efi_runtime->get_time,EFI_GET_TIME);
+ EFI_HYPERCALL_PATCH(efi_runtime->set_time,EFI_SET_TIME);
+ EFI_HYPERCALL_PATCH(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
+ EFI_HYPERCALL_PATCH(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
+ EFI_HYPERCALL_PATCH(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
+ EFI_HYPERCALL_PATCH(efi_runtime->get_variable,EFI_GET_VARIABLE);
+ EFI_HYPERCALL_PATCH(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
+ EFI_HYPERCALL_PATCH(efi_runtime->set_variable,EFI_SET_VARIABLE);
+ EFI_HYPERCALL_PATCH(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
+ EFI_HYPERCALL_PATCH(efi_runtime->reset_system,EFI_RESET_SYSTEM);
+
+ efi_tables[0].guid = SAL_SYSTEM_TABLE_GUID;
+ efi_tables[0].table = dom_pa(sal_systab);
+ for (i = 1; i < NUM_EFI_SYS_TABLES; i++) {
+ efi_tables[i].guid = NULL_GUID;
+ efi_tables[i].table = 0;
+ }
+ if (d == dom0) {
+ printf("Domain0 EFI passthrough:");
+ i = 1;
+ if (efi.mps) {
+ efi_tables[i].guid = MPS_TABLE_GUID;
+ efi_tables[i].table = __pa(efi.mps);
+ printf(" MPS=%0xlx",efi_tables[i].table);
+ i++;
+ }
+ if (efi.acpi20) {
+ efi_tables[i].guid = ACPI_20_TABLE_GUID;
+ efi_tables[i].table = __pa(efi.acpi20);
+ printf(" ACPI 2.0=%0xlx",efi_tables[i].table);
+ i++;
+ }
+ if (efi.acpi) {
+ efi_tables[i].guid = ACPI_TABLE_GUID;
+ efi_tables[i].table = __pa(efi.acpi);
+ printf(" ACPI=%0xlx",efi_tables[i].table);
+ i++;
+ }
+ if (efi.smbios) {
+ efi_tables[i].guid = SMBIOS_TABLE_GUID;
+ efi_tables[i].table = __pa(efi.smbios);
+ printf(" SMBIOS=%0xlx",efi_tables[i].table);
+ i++;
+ }
+ if (efi.hcdp) {
+ efi_tables[i].guid = HCDP_TABLE_GUID;
+ efi_tables[i].table = __pa(efi.hcdp);
+ printf(" HCDP=%0xlx",efi_tables[i].table);
+ i++;
+ }
+ printf("\n");
+ }
+
+ /* fill in the SAL system table: */
+ memcpy(sal_systab->signature, "SST_", 4);
+ sal_systab->size = sizeof(*sal_systab);
+ sal_systab->sal_rev_minor = 1;
+ sal_systab->sal_rev_major = 0;
+ sal_systab->entry_count = 1;
+
+ strcpy(sal_systab->oem_id, "Xen/ia64");
+ strcpy(sal_systab->product_id, "Xen/ia64");
+
+ /* fill in an entry point: */
+ sal_ed->type = SAL_DESC_ENTRY_POINT;
+#define FW_HYPERCALL_PATCH(tgt,call,ret) do { \
+ dom_fw_hypercall_patch(d,FW_HYPERCALL_##call##_PADDR,FW_HYPERCALL_##call,ret); \
+ tgt = FW_HYPERCALL_##call##_PADDR + ((d==dom0)?dom0_start:0); \
+ } while (0)
+ FW_HYPERCALL_PATCH(sal_ed->pal_proc,PAL_CALL,0);
+ FW_HYPERCALL_PATCH(sal_ed->sal_proc,SAL_CALL,1);
+ sal_ed->gp = 0; // will be ignored
+
+ for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp)
+ checksum += *cp;
+
+ sal_systab->checksum = -checksum;
+
+ /* simulate 1MB free memory at physical address zero */
+ i = 0;
+ MAKE_MD(EFI_BOOT_SERVICES_DATA,EFI_MEMORY_WB,0*MB,1*MB);
+ /* hypercall patches live here, masquerade as reserved PAL memory */
+ MAKE_MD(EFI_PAL_CODE,EFI_MEMORY_WB,HYPERCALL_START,HYPERCALL_END);
+ MAKE_MD(EFI_CONVENTIONAL_MEMORY,EFI_MEMORY_WB,HYPERCALL_END,maxmem);
+#ifdef PASS_THRU_IOPORT_SPACE
+ if (d == dom0 && !running_on_sim) {
+ /* pass through the I/O port space */
+ efi_memory_desc_t efi_get_io_md(void);
+ efi_memory_desc_t ia64_efi_io_md = efi_get_io_md();
+ u32 type;
+ u64 iostart, ioend, ioattr;
+
+ type = ia64_efi_io_md.type;
+ iostart = ia64_efi_io_md.phys_addr;
+ ioend = ia64_efi_io_md.phys_addr +
+ (ia64_efi_io_md.num_pages << 12);
+ ioattr = ia64_efi_io_md.attribute;
+ MAKE_MD(type,ioattr,iostart,ioend);
+ }
+ else
+ MAKE_MD(EFI_RESERVED_TYPE,0,0,0);
+#endif
+
+ bp->efi_systab = dom_pa(fw_mem);
+ bp->efi_memmap = dom_pa(efi_memmap);
+ bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t);
+ bp->efi_memdesc_size = sizeof(efi_memory_desc_t);
+ bp->efi_memdesc_version = 1;
+ bp->command_line = dom_pa(cmd_line);
+ bp->console_info.num_cols = 80;
+ bp->console_info.num_rows = 25;
+ bp->console_info.orig_x = 0;
+ bp->console_info.orig_y = 24;
+ bp->fpswa = 0;
+
+ return bp;
+}
--- /dev/null
+/*
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <xen/smp.h>
+#include <xen/delay.h>
+#include <xen/softirq.h>
+#include <xen/mm.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/desc.h>
+//#include <asm/mpspec.h>
+#include <xen/irq.h>
+#include <xen/event.h>
+//#include <xen/shadow.h>
+#include <xen/console.h>
+
+#include <xen/elf.h>
+//#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/dma.h> /* for MAX_DMA_ADDRESS */
+
+#include <asm/asm-offsets.h> /* for IA64_THREAD_INFO_SIZE */
+
+#include <asm/vcpu.h> /* for function declarations */
+
+#define CONFIG_DOMAIN0_CONTIGUOUS
+unsigned long dom0_start = -1L;
+unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
+//FIXME: alignment should be 256MB, lest Linux use a 256MB page size
+unsigned long dom0_align = 64*1024*1024;
+
+extern kmem_cache_t *domain_struct_cachep;
+
+// initialized by arch/ia64/setup.c:find_initrd()
+unsigned long initrd_start = 0, initrd_end = 0;
+
+extern int loadelfimage(char *);
+extern int readelfimage_base_and_size(char *, unsigned long,
+ unsigned long *, unsigned long *, unsigned long *);
+
+unsigned long map_domain_page0(struct domain *);
+extern unsigned long dom_fw_setup(struct domain *, char *, int);
+
+/* this belongs in include/asm, but there doesn't seem to be a suitable place */
+void free_perdomain_pt(struct domain *d)
+{
+ dummy();
+ //free_page((unsigned long)d->mm.perdomain_pt);
+}
+
+int hlt_counter;
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+static void default_idle(void)
+{
+ if ( hlt_counter == 0 )
+ {
+ local_irq_disable();
+ if ( !softirq_pending(smp_processor_id()) )
+ safe_halt();
+ //else
+ local_irq_enable();
+ }
+}
+
+void continue_cpu_idle_loop(void)
+{
+ int cpu = smp_processor_id();
+ for ( ; ; )
+ {
+#ifdef IA64
+// __IRQ_STAT(cpu, idle_timestamp) = jiffies
+#else
+ irq_stat[cpu].idle_timestamp = jiffies;
+#endif
+ while ( !softirq_pending(cpu) )
+ default_idle();
+ do_softirq();
+ }
+}
+
+void startup_cpu_idle_loop(void)
+{
+ /* Just some sanity to ensure that the scheduler is set up okay. */
+ ASSERT(current->domain == IDLE_DOMAIN_ID);
+ domain_unpause_by_systemcontroller(current);
+ __enter_scheduler();
+
+ /*
+ * Declares CPU setup done to the boot processor.
+ * Therefore memory barrier to ensure state is visible.
+ */
+ smp_mb();
+ init_idle();
+#if 0
+//do we have to ensure the idle task has a shared page so that, for example,
+//region registers can be loaded from it. Apparently not...
+ idle0_task.shared_info = (void *)alloc_xenheap_page();
+ memset(idle0_task.shared_info, 0, PAGE_SIZE);
+ /* pin mapping */
+ // FIXME: Does this belong here? Or do only at domain switch time?
+ {
+ /* WARNING: following must be inlined to avoid nested fault */
+ unsigned long psr = ia64_clear_ic();
+ ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
+ pte_val(pfn_pte(ia64_tpa(idle0_task.shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
+ PAGE_SHIFT);
+ ia64_set_psr(psr);
+ ia64_srlz_i();
+ }
+#endif
+
+ continue_cpu_idle_loop();
+}
+
+struct domain *arch_alloc_domain_struct(void)
+{
+ return xmem_cache_alloc(domain_struct_cachep);
+}
+
+void arch_free_domain_struct(struct domain *d)
+{
+ xmem_cache_free(domain_struct_cachep,d);
+}
+
+struct exec_domain *arch_alloc_exec_domain_struct(void)
+{
+ return alloc_task_struct();
+}
+
+void arch_free_exec_domain_struct(struct exec_domain *ed)
+{
+ free_task_struct(ed);
+}
+
+void arch_do_createdomain(struct exec_domain *ed)
+{
+ struct domain *d = ed->domain;
+
+ d->shared_info = (void *)alloc_xenheap_page();
+ ed->vcpu_info = (void *)alloc_xenheap_page();
+ if (!ed->vcpu_info) {
+ printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
+ while (1);
+ }
+ memset(ed->vcpu_info, 0, PAGE_SIZE);
+ /* pin mapping */
+ // FIXME: Does this belong here? Or do only at domain switch time?
+#if 0
+ // this is now done in ia64_new_rr7
+ {
+ /* WARNING: following must be inlined to avoid nested fault */
+ unsigned long psr = ia64_clear_ic();
+ ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
+ pte_val(pfn_pte(ia64_tpa(d->shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
+ PAGE_SHIFT);
+ ia64_set_psr(psr);
+ ia64_srlz_i();
+ }
+#endif
+ d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
+ if ((d->metaphysical_rid = allocate_metaphysical_rid()) == -1UL)
+ BUG();
+ ed->vcpu_info->arch.metaphysical_mode = 1;
+#define DOMAIN_RID_BITS_DEFAULT 18
+ if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
+ BUG();
+ // the following will eventually need to be negotiated dynamically
+ d->xen_vastart = 0xfffc000000000000;
+ d->xen_vaend = 0xfffe000000000000;
+ d->shared_info_va = 0xfffd000000000000;
+ d->breakimm = 0x1000;
+ // stay on kernel stack because may get interrupts!
+ // ia64_ret_from_clone (which b0 gets in new_thread) switches
+ // to user stack
+ ed->thread.on_ustack = 0;
+}
+
+void arch_do_boot_vcpu(struct exec_domain *p)
+{
+ return;
+}
+
+int arch_final_setup_guest(struct exec_domain *p, full_execution_context_t *c)
+{
+ dummy();
+ return 1;
+}
+
+void domain_relinquish_memory(struct domain *d)
+{
+ dummy();
+}
+
+// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
+// and linux/arch/ia64/kernel/process.c:kernel_thread()
+void new_thread(struct exec_domain *ed,
+ unsigned long start_pc,
+ unsigned long start_stack,
+ unsigned long start_info)
+{
+ struct domain *d = ed->domain;
+ struct switch_stack *sw;
+ struct pt_regs *regs;
+ unsigned long new_rbs;
+ struct ia64_boot_param *bp;
+ extern char ia64_ret_from_clone;
+ extern char saved_command_line[];
+
+#ifdef CONFIG_DOMAIN0_CONTIGUOUS
+ if (d == dom0) start_pc += dom0_start;
+#endif
+ regs = (struct pt_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1;
+ sw = (struct switch_stack *) regs - 1;
+ new_rbs = (unsigned long) ed + IA64_RBS_OFFSET;
+ regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
+ | IA64_PSR_BITS_TO_SET | IA64_PSR_BN
+ & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
+ regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
+ regs->cr_iip = start_pc;
+ regs->ar_rsc = 0xf; /* eager mode, privilege level 1 */
+ regs->ar_rnat = 0;
+ regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
+ regs->loadrs = 0;
+ //regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */
+ //regs->r8 = 0x01234567890abcdef; // FIXME: temp marker
+ //regs->r12 = ((unsigned long) regs - 16); /* 16 byte scratch */
+ regs->cr_ifs = 1UL << 63;
+ regs->pr = 0;
+ sw->pr = 0;
+ regs->ar_pfs = 0;
+ sw->ar_pfs = 0;
+ sw->ar_bspstore = new_rbs;
+ //regs->r13 = (unsigned long) ed;
+printf("new_thread: ed=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
+ed,regs,sw,new_rbs,IA64_STK_OFFSET,®s->r8);
+ sw->b0 = (unsigned long) &ia64_ret_from_clone;
+ ed->thread.ksp = (unsigned long) sw - 16;
+ //ed->thread_info->flags = 0;
+printk("new_thread, about to call init_all_rr\n");
+ init_all_rr(ed);
+ // set up boot parameters (and fake firmware)
+printk("new_thread, about to call dom_fw_setup\n");
+ regs->r28 = dom_fw_setup(d,saved_command_line,256L); //FIXME
+printk("new_thread, done with dom_fw_setup\n");
+ // don't forget to set this!
+ ed->vcpu_info->arch.banknum = 1;
+}
+
+static struct page * map_new_domain0_page(unsigned long mpaddr)
+{
+ if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
+ printk("map_new_domain0_page: bad domain0 mpaddr %p!\n",mpaddr);
+printk("map_new_domain0_page: start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
+ while(1);
+ }
+ return pfn_to_page((mpaddr >> PAGE_SHIFT));
+}
+
+/* allocate new page for domain and map it to the specified metaphysical addr */
+struct page * map_new_domain_page(struct domain *d, unsigned long mpaddr)
+{
+ struct mm_struct *mm = d->arch.mm;
+ struct page *p = (struct page *)0;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+extern unsigned long vhpt_paddr, vhpt_pend;
+
+ if (!mm->pgd) {
+ printk("map_new_domain_page: domain pgd must exist!\n");
+ return(p);
+ }
+ pgd = pgd_offset(mm,mpaddr);
+ if (pgd_none(*pgd))
+ pgd_populate(mm, pgd, pmd_alloc_one(mm,mpaddr));
+
+ pmd = pmd_offset(pgd, mpaddr);
+ if (pmd_none(*pmd))
+ pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
+
+ pte = pte_offset_map(pmd, mpaddr);
+ if (pte_none(*pte)) {
+#ifdef CONFIG_DOMAIN0_CONTIGUOUS
+ if (d == dom0) p = map_new_domain0_page(mpaddr);
+ else
+#endif
+ p = alloc_page(GFP_KERNEL);
+ if (unlikely(!p)) {
+printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
+ return(p);
+ }
+if (unlikely(page_to_phys(p) > vhpt_paddr && page_to_phys(p) < vhpt_pend)) {
+ printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_phys(p));
+}
+ set_pte(pte, pfn_pte(page_to_phys(p) >> PAGE_SHIFT,
+ __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
+ }
+ else printk("map_new_domain_page: page %p already mapped!\n",p);
+ return p;
+}
+
+void mpafoo(unsigned long mpaddr)
+{
+ extern unsigned long privop_trace;
+ if (mpaddr == 0x3800)
+ privop_trace = 1;
+}
+
+unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
+{
+ struct mm_struct *mm = d->arch.mm;
+ pgd_t *pgd = pgd_offset(mm, mpaddr);
+ pmd_t *pmd;
+ pte_t *pte;
+
+#ifdef CONFIG_DOMAIN0_CONTIGUOUS
+ if (d == dom0) {
+ if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
+ //printk("lookup_domain_mpa: bad dom0 mpaddr %p!\n",mpaddr);
+//printk("lookup_domain_mpa: start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
+ mpafoo(mpaddr);
+ }
+ pte_t pteval = pfn_pte(mpaddr >> PAGE_SHIFT,
+ __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX));
+ pte = &pteval;
+ return *(unsigned long *)pte;
+ }
+#endif
+tryagain:
+ if (pgd_present(*pgd)) {
+ pmd = pmd_offset(pgd,mpaddr);
+ if (pmd_present(*pmd)) {
+ pte = pte_offset_map(pmd,mpaddr);
+ if (pte_present(*pte)) {
+//printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte));
+ return *(unsigned long *)pte;
+ }
+ }
+ }
+ /* if lookup fails and mpaddr is "legal", "create" the page */
+ if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
+ // FIXME: should zero out pages for security reasons
+ if (map_new_domain_page(d,mpaddr)) goto tryagain;
+ }
+ printk("lookup_domain_mpa: bad mpa %p (> %p\n",
+ mpaddr,d->max_pages<<PAGE_SHIFT);
+ mpafoo(mpaddr);
+ return 0;
+}
+
+// FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
+unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
+{
+ unsigned long pte = lookup_domain_mpa(d,mpaddr);
+ unsigned long imva;
+
+ pte &= _PAGE_PPN_MASK;
+ imva = __va(pte);
+ imva |= mpaddr & ~PAGE_MASK;
+ return(imva);
+}
+
+// remove following line if not privifying in memory
+//#define HAVE_PRIVIFY_MEMORY
+#ifndef HAVE_PRIVIFY_MEMORY
+#define privify_memory(x,y) do {} while(0)
+#endif
+
+// see arch/x86/xxx/domain_build.c
+int elf_sanity_check(Elf_Ehdr *ehdr)
+{
+ return (IS_ELF(*ehdr));
+}
+
+void loaddomainelfimage(struct domain *d, unsigned long image_start)
+{
+ char *elfbase = image_start;
+ Elf_Ehdr *ehdr = (Elf_Ehdr *)image_start;
+ Elf_Phdr *phdr;
+ int h, filesz, memsz, paddr;
+ unsigned long elfaddr, dom_mpaddr, dom_imva;
+ struct page *p;
+
+ for ( h = 0; h < ehdr->e_phnum; h++ ) {
+ phdr = (Elf_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
+ //if ( !is_loadable_phdr(phdr) )
+ if ((phdr->p_type != PT_LOAD)) {
+ continue;
+ }
+ filesz = phdr->p_filesz; memsz = phdr->p_memsz;
+ elfaddr = elfbase + phdr->p_offset;
+ dom_mpaddr = phdr->p_paddr;
+//printf("p_offset: %x, size=%x\n",elfaddr,filesz);
+#ifdef CONFIG_DOMAIN0_CONTIGUOUS
+ if (d == dom0) {
+ if (dom_mpaddr+memsz>dom0_size || dom_mpaddr+filesz>dom0_size) {
+ printf("Domain0 doesn't fit in allocated space!\n");
+ while(1);
+ }
+ dom_imva = __va(dom_mpaddr + dom0_start);
+ memcpy(dom_imva,elfaddr,filesz);
+ if (memsz > filesz) memset(dom_imva+filesz,0,memsz-filesz);
+//FIXME: This test for code seems to find a lot more than objdump -x does
+ if (phdr->p_flags & PF_X) privify_memory(dom_imva,filesz);
+ }
+ else
+#endif
+ while (memsz > 0) {
+ p = map_new_domain_page(d,dom_mpaddr);
+ if (unlikely(!p)) BUG();
+ dom_imva = __va(page_to_phys(p));
+ if (filesz > 0) {
+ if (filesz >= PAGE_SIZE)
+ memcpy(dom_imva,elfaddr,PAGE_SIZE);
+ else { // copy partial page, zero the rest of page
+ memcpy(dom_imva,elfaddr,filesz);
+ memset(dom_imva+filesz,0,PAGE_SIZE-filesz);
+ }
+//FIXME: This test for code seems to find a lot more than objdump -x does
+ if (phdr->p_flags & PF_X)
+ privify_memory(dom_imva,PAGE_SIZE);
+ }
+ else if (memsz > 0) // always zero out entire page
+ memset(dom_imva,0,PAGE_SIZE);
+ memsz -= PAGE_SIZE; filesz -= PAGE_SIZE;
+ elfaddr += PAGE_SIZE; dom_mpaddr += PAGE_SIZE;
+ }
+ }
+}
+
+
+void alloc_dom0(void)
+{
+#ifdef CONFIG_DOMAIN0_CONTIGUOUS
+ if (platform_is_hp_ski()) {
+ dom0_size = 128*1024*1024; //FIXME: Should be configurable
+ }
+ printf("alloc_dom0: starting (initializing %d MB...)\n",dom0_size/(1024*1024));
+ dom0_start = __alloc_bootmem(dom0_size,dom0_align,__pa(MAX_DMA_ADDRESS));
+ if (!dom0_start) {
+ printf("construct_dom0: can't allocate contiguous memory size=%p\n",
+ dom0_size);
+ while(1);
+ }
+ printf("alloc_dom0: dom0_start=%p\n",dom0_start);
+#else
+ dom0_start = 0;
+#endif
+
+}
+
+int construct_dom0(struct domain *d,
+ unsigned long alloc_start,
+ unsigned long alloc_end,
+ unsigned long image_start, unsigned long image_len,
+ unsigned long initrd_start, unsigned long initrd_len,
+ char *cmdline)
+{
+ char *dst;
+ int i, rc;
+ unsigned long pfn, mfn;
+ unsigned long nr_pt_pages;
+ unsigned long count;
+ //l2_pgentry_t *l2tab, *l2start;
+ //l1_pgentry_t *l1tab = NULL, *l1start = NULL;
+ struct pfn_info *page = NULL;
+ start_info_t *si;
+ struct exec_domain *ed = d->exec_domain[0];
+
+ struct domain_setup_info dsi;
+ unsigned long p_start;
+ unsigned long pkern_start;
+ unsigned long pkern_entry;
+ unsigned long pkern_end;
+
+ extern void physdev_init_dom0(struct domain *);
+
+//printf("construct_dom0: starting\n");
+ /* Sanity! */
+#ifndef CLONE_DOMAIN0
+ if ( d != dom0 )
+ BUG();
+ if ( test_bit(DF_CONSTRUCTED, &d->d_flags) )
+ BUG();
+#endif
+
+ memset(&dsi, 0, sizeof(struct domain_setup_info));
+
+ printk("*** LOADING DOMAIN 0 ***\n");
+
+ d->max_pages = dom0_size/PAGE_SIZE;
+ image_start = __va(ia64_boot_param->initrd_start);
+ image_len = ia64_boot_param->initrd_size;
+//printk("image_start=%lx, image_len=%lx\n",image_start,image_len);
+//printk("First word of image: %lx\n",*(unsigned long *)image_start);
+
+//printf("construct_dom0: about to call parseelfimage\n");
+ rc = parseelfimage(image_start, image_len, &dsi);
+ if ( rc != 0 )
+ return rc;
+
+ p_start = dsi.v_start;
+ pkern_start = dsi.v_kernstart;
+ pkern_end = dsi.v_kernend;
+ pkern_entry = dsi.v_kernentry;
+
+//printk("p_start=%lx, pkern_start=%lx, pkern_end=%lx, pkern_entry=%lx\n",p_start,pkern_start,pkern_end,pkern_entry);
+
+ if ( (p_start & (PAGE_SIZE-1)) != 0 )
+ {
+ printk("Initial guest OS must load to a page boundary.\n");
+ return -EINVAL;
+ }
+
+ printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
+ " Kernel image: %lx->%lx\n"
+ " Entry address: %lx\n"
+ " Init. ramdisk: (NOT IMPLEMENTED YET)\n",
+ pkern_start, pkern_end, pkern_entry);
+
+ if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
+ {
+ printk("Initial guest OS requires too much space\n"
+ "(%luMB is greater than %luMB limit)\n",
+ (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20);
+ return -ENOMEM;
+ }
+
+ // if high 3 bits of pkern start are non-zero, error
+
+ // if pkern end is after end of metaphysical memory, error
+ // (we should be able to deal with this... later)
+
+
+ //
+
+#if 0
+ strcpy(d->name,"Domain0");
+#endif
+ /* Set up shared-info area. */
+ update_dom_time(d);
+ d->shared_info->domain_time = 0;
+
+ // prepare domain0 pagetable (maps METAphysical to physical)
+ // following is roughly mm_init() in linux/kernel/fork.c
+ d->arch.mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
+ if (unlikely(!d->arch.mm)) {
+ printk("Can't allocate mm_struct for domain0\n");
+ return -ENOMEM;
+ }
+ memset(d->arch.mm, 0, sizeof(*d->arch.mm));
+ d->arch.mm->pgd = pgd_alloc(d->arch.mm);
+ if (unlikely(!d->arch.mm->pgd)) {
+ printk("Can't allocate pgd for domain0\n");
+ return -ENOMEM;
+ }
+
+
+ /* Mask all upcalls... */
+ for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
+
+ /* Copy the OS image. */
+ //(void)loadelfimage(image_start);
+ loaddomainelfimage(d,image_start);
+
+ /* Copy the initial ramdisk. */
+ //if ( initrd_len != 0 )
+ // memcpy((void *)vinitrd_start, initrd_start, initrd_len);
+
+#if 0
+ /* Set up start info area. */
+ //si = (start_info_t *)vstartinfo_start;
+ memset(si, 0, PAGE_SIZE);
+ si->nr_pages = d->tot_pages;
+ si->shared_info = virt_to_phys(d->shared_info);
+ si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN;
+ //si->pt_base = vpt_start;
+ //si->nr_pt_frames = nr_pt_pages;
+ //si->mfn_list = vphysmap_start;
+
+ if ( initrd_len != 0 )
+ {
+ //si->mod_start = vinitrd_start;
+ si->mod_len = initrd_len;
+ printk("Initrd len 0x%lx, start at 0x%08lx\n",
+ si->mod_len, si->mod_start);
+ }
+
+ dst = si->cmd_line;
+ if ( cmdline != NULL )
+ {
+ for ( i = 0; i < 255; i++ )
+ {
+ if ( cmdline[i] == '\0' )
+ break;
+ *dst++ = cmdline[i];
+ }
+ }
+ *dst = '\0';
+
+ zap_low_mappings(); /* Do the same for the idle page tables. */
+#endif
+
+ /* Give up the VGA console if DOM0 is configured to grab it. */
+#ifdef IA64
+ if (cmdline != NULL)
+#endif
+ console_endboot(strstr(cmdline, "tty0") != NULL);
+
+ /* DOM0 gets access to everything. */
+#ifdef CLONE_DOMAIN0
+if (d == dom0)
+#endif
+ physdev_init_dom0(d);
+
+ set_bit(DF_CONSTRUCTED, &d->d_flags);
+
+ new_thread(ed, pkern_entry, 0, 0);
+ // FIXME: Hack for keyboard input
+#ifdef CLONE_DOMAIN0
+if (d == dom0)
+#endif
+ serial_input_init();
+ if (d == dom0) {
+ ed->vcpu_info->arch.delivery_mask[0] = -1L;
+ ed->vcpu_info->arch.delivery_mask[1] = -1L;
+ ed->vcpu_info->arch.delivery_mask[2] = -1L;
+ ed->vcpu_info->arch.delivery_mask[3] = -1L;
+ }
+ else __set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
+
+ return 0;
+}
+
+void machine_restart(char * __unused)
+{
+ if (platform_is_hp_ski()) dummy();
+ printf("machine_restart called: spinning....\n");
+ while(1);
+}
+
+void machine_halt(void)
+{
+ if (platform_is_hp_ski()) dummy();
+ printf("machine_halt called: spinning....\n");
+ while(1);
+}
+
+void dummy(void)
+{
+ if (platform_is_hp_ski()) asm("break 0;;");
+ printf("dummy called: spinning....\n");
+ while(1);
+}
+
+
+void switch_to(struct exec_domain *prev, struct exec_domain *next)
+{
+ struct exec_domain *last;
+
+ __switch_to(prev,next,last);
+ //set_current(next);
+}
+
+void domain_pend_keyboard_interrupt(int irq)
+{
+ vcpu_pend_interrupt(dom0->exec_domain[0],irq);
+}
--- /dev/null
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <asm/desc.h>
+
+#define INIT_MM(name) \
+{ \
+ .pgd = swapper_pg_dir, \
+ .mm_users = ATOMIC_INIT(2), \
+ .mm_count = ATOMIC_INIT(1), \
+ .page_table_lock = SPIN_LOCK_UNLOCKED, \
+ .mmlist = LIST_HEAD_INIT(name.mmlist), \
+}
+
+#define IDLE0_EXEC_DOMAIN(_ed,_d) \
+{ \
+ processor: 0, \
+ mm: 0, \
+ thread: INIT_THREAD, \
+ domain: (_d) \
+}
+
+#define IDLE0_DOMAIN(_t) \
+{ \
+ id: IDLE_DOMAIN_ID, \
+ d_flags: 1<<DF_IDLETASK, \
+ refcnt: ATOMIC_INIT(1) \
+}
+
+struct mm_struct init_mm = INIT_MM(init_mm);
+EXPORT_SYMBOL(init_mm);
+
+struct domain idle0_domain = IDLE0_DOMAIN(idle0_domain);
+#if 0
+struct exec_domain idle0_exec_domain = IDLE0_EXEC_DOMAIN(idle0_exec_domain,
+ &idle0_domain);
+#endif
+
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is properly aligned due to the way process stacks are
+ * handled. This is done by having a special ".data.init_task" section...
+ */
+union {
+ struct {
+ struct domain task;
+ } s;
+ unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
+} init_task_mem asm ("init_task") __attribute__((section(".data.init_task")));
+// = {{
+ ;
+//.task = IDLE0_EXEC_DOMAIN(init_task_mem.s.task,&idle0_domain),
+//};
+//};
+
+EXPORT_SYMBOL(init_task);
+
--- /dev/null
+/*
+ * linux/arch/ia64/kernel/irq.c
+ *
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ *
+ * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
+ *
+ * 4/14/2004: Added code to handle cpu migration and do safe irq
+ * migration without lossing interrupts for iosapic
+ * architecture.
+ */
+
+/*
+ * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
+ *
+ * IRQs are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#ifndef XEN
+#include <linux/signal.h>
+#endif
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/slab.h>
+#ifndef XEN
+#include <linux/random.h>
+#include <linux/cpu.h>
+#endif
+#include <linux/ctype.h>
+#ifndef XEN
+#include <linux/smp_lock.h>
+#endif
+#include <linux/init.h>
+#ifndef XEN
+#include <linux/kernel_stat.h>
+#endif
+#include <linux/irq.h>
+#ifndef XEN
+#include <linux/proc_fs.h>
+#endif
+#include <linux/seq_file.h>
+#ifndef XEN
+#include <linux/kallsyms.h>
+#include <linux/notifier.h>
+#endif
+
+#include <asm/atomic.h>
+#ifndef XEN
+#include <asm/cpu.h>
+#endif
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#ifndef XEN
+#include <asm/tlbflush.h>
+#endif
+#include <asm/delay.h>
+#include <asm/irq.h>
+
+#ifdef XEN
+#include <xen/event.h>
+#define _irq_desc irq_desc
+#define irq_descp(irq) &irq_desc[irq]
+#define apicid_to_phys_cpu_present(x) 1
+#endif
+
+
+/*
+ * Linux has a controller-independent x86 interrupt architecture.
+ * every controller has a 'controller-template', that is used
+ * by the main code to do the right thing. Each driver-visible
+ * interrupt source is transparently wired to the appropriate
+ * controller. Thus drivers need not be aware of the
+ * interrupt-controller.
+ *
+ * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
+ * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
+ * (IO-APICs assumed to be messaging to Pentium local-APICs)
+ *
+ * the code is designed to be easily extended with new/different
+ * interrupt controllers, without having to do assembly magic.
+ */
+
+/*
+ * Controller mappings for all interrupt sources:
+ */
+irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .status = IRQ_DISABLED,
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
+
+/*
+ * This is updated when the user sets irq affinity via /proc
+ */
+cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
+
+#ifdef CONFIG_IA64_GENERIC
+irq_desc_t * __ia64_irq_desc (unsigned int irq)
+{
+ return _irq_desc + irq;
+}
+
+ia64_vector __ia64_irq_to_vector (unsigned int irq)
+{
+ return (ia64_vector) irq;
+}
+
+unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
+{
+ return (unsigned int) vec;
+}
+#endif
+
+static void register_irq_proc (unsigned int irq);
+
+/*
+ * Special irq handlers.
+ */
+
+#ifdef XEN
+void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
+#else
+irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
+{ return IRQ_NONE; }
+#endif
+
+/*
+ * Generic no controller code
+ */
+
+static void enable_none(unsigned int irq) { }
+static unsigned int startup_none(unsigned int irq) { return 0; }
+static void disable_none(unsigned int irq) { }
+static void ack_none(unsigned int irq)
+{
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves, it doesn't deserve
+ * a generic callback i think.
+ */
+#ifdef CONFIG_X86
+ printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
+#ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * Currently unexpected vectors happen only on SMP and APIC.
+ * We _must_ ack these because every local APIC has only N
+ * irq slots per priority level, and a 'hanging, unacked' IRQ
+ * holds up an irq slot - in excessive cases (when multiple
+ * unexpected vectors occur) that might lock up the APIC
+ * completely.
+ */
+ ack_APIC_irq();
+#endif
+#endif
+#ifdef CONFIG_IA64
+ printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
+#endif
+}
+
+/* startup is the same as "enable", shutdown is same as "disable" */
+#define shutdown_none disable_none
+#define end_none enable_none
+
+struct hw_interrupt_type no_irq_type = {
+ "none",
+ startup_none,
+ shutdown_none,
+ enable_none,
+ disable_none,
+ ack_none,
+ end_none
+};
+
+atomic_t irq_err_count;
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
+atomic_t irq_mis_count;
+#endif
+#endif
+
+/*
+ * Generic, controller-independent functions:
+ */
+
+#ifndef XEN
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int j, i = *(loff_t *) v;
+ struct irqaction * action;
+ irq_desc_t *idesc;
+ unsigned long flags;
+
+ if (i == 0) {
+ seq_puts(p, " ");
+ for (j=0; j<NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "CPU%d ",j);
+ seq_putc(p, '\n');
+ }
+
+ if (i < NR_IRQS) {
+ idesc = irq_descp(i);
+ spin_lock_irqsave(&idesc->lock, flags);
+ action = idesc->action;
+ if (!action)
+ goto skip;
+ seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+ seq_printf(p, " %14s", idesc->handler->typename);
+ seq_printf(p, " %s", action->name);
+
+ for (action=action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+
+ seq_putc(p, '\n');
+skip:
+ spin_unlock_irqrestore(&idesc->lock, flags);
+ } else if (i == NR_IRQS) {
+ seq_puts(p, "NMI: ");
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", nmi_count(j));
+ seq_putc(p, '\n');
+#ifdef CONFIG_X86_LOCAL_APIC
+ seq_puts(p, "LOC: ");
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
+ seq_putc(p, '\n');
+#endif
+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+#endif
+#endif
+ }
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SMP
+inline void synchronize_irq(unsigned int irq)
+{
+ while (irq_descp(irq)->status & IRQ_INPROGRESS)
+ cpu_relax();
+}
+EXPORT_SYMBOL(synchronize_irq);
+#endif
+
+/*
+ * This should really return information about whether
+ * we should do bottom half handling etc. Right now we
+ * end up _always_ checking the bottom half, which is a
+ * waste of time and is not what some drivers would
+ * prefer.
+ */
+int handle_IRQ_event(unsigned int irq,
+ struct pt_regs *regs, struct irqaction *action)
+{
+ int status = 1; /* Force the "do bottom halves" bit */
+ int retval = 0;
+
+#ifndef XEN
+ if (!(action->flags & SA_INTERRUPT))
+#endif
+ local_irq_enable();
+
+#ifdef XEN
+ action->handler(irq, action->dev_id, regs);
+#else
+ do {
+ status |= action->flags;
+ retval |= action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+#endif
+ local_irq_disable();
+ return retval;
+}
+
+#ifndef XEN
+static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ struct irqaction *action;
+
+ if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
+ printk(KERN_ERR "irq event %d: bogus return value %x\n",
+ irq, action_ret);
+ } else {
+ printk(KERN_ERR "irq %d: nobody cared!\n", irq);
+ }
+ dump_stack();
+ printk(KERN_ERR "handlers:\n");
+ action = desc->action;
+ do {
+ printk(KERN_ERR "[<%p>]", action->handler);
+ print_symbol(" (%s)",
+ (unsigned long)action->handler);
+ printk("\n");
+ action = action->next;
+ } while (action);
+}
+
+static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ static int count = 100;
+
+ if (count) {
+ count--;
+ __report_bad_irq(irq, desc, action_ret);
+ }
+}
+#endif
+
+static int noirqdebug;
+
+static int __init noirqdebug_setup(char *str)
+{
+ noirqdebug = 1;
+ printk("IRQ lockup detection disabled\n");
+ return 1;
+}
+
+__setup("noirqdebug", noirqdebug_setup);
+
+/*
+ * If 99,900 of the previous 100,000 interrupts have not been handled then
+ * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
+ * turn the IRQ off.
+ *
+ * (The other 100-of-100,000 interrupts may have been a correctly-functioning
+ * device sharing an IRQ with the failing one)
+ *
+ * Called under desc->lock
+ */
+#ifndef XEN
+static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ if (action_ret != IRQ_HANDLED) {
+ desc->irqs_unhandled++;
+ if (action_ret != IRQ_NONE)
+ report_bad_irq(irq, desc, action_ret);
+ }
+
+ desc->irq_count++;
+ if (desc->irq_count < 100000)
+ return;
+
+ desc->irq_count = 0;
+ if (desc->irqs_unhandled > 99900) {
+ /*
+ * The interrupt is stuck
+ */
+ __report_bad_irq(irq, desc, action_ret);
+ /*
+ * Now kill the IRQ
+ */
+ printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
+ }
+ desc->irqs_unhandled = 0;
+}
+#endif
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
+
+/**
+ * disable_irq_nosync - disable an irq without waiting
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables and Enables are
+ * nested.
+ * Unlike disable_irq(), this function does not ensure existing
+ * instances of the IRQ handler have completed before returning.
+ *
+ * This function may be called from IRQ context.
+ */
+
+inline void disable_irq_nosync(unsigned int irq)
+{
+ irq_desc_t *desc = irq_descp(irq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->depth++) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+EXPORT_SYMBOL(disable_irq_nosync);
+
+/**
+ * disable_irq - disable an irq and wait for completion
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Enables and Disables are
+ * nested.
+ * This function waits for any pending IRQ handlers for this interrupt
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+
+void disable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = irq_descp(irq);
+
+ disable_irq_nosync(irq);
+ if (desc->action)
+ synchronize_irq(irq);
+}
+EXPORT_SYMBOL(disable_irq);
+
+/**
+ * enable_irq - enable handling of an irq
+ * @irq: Interrupt to enable
+ *
+ * Undoes the effect of one call to disable_irq(). If this
+ * matches the last disable, processing of interrupts on this
+ * IRQ line is re-enabled.
+ *
+ * This function may be called from IRQ context.
+ */
+
+void enable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = irq_descp(irq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ switch (desc->depth) {
+ case 1: {
+ unsigned int status = desc->status & ~IRQ_DISABLED;
+ desc->status = status;
+#ifndef XEN
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ desc->status = status | IRQ_REPLAY;
+ hw_resend_irq(desc->handler,irq);
+ }
+#endif
+ desc->handler->enable(irq);
+ /* fall-through */
+ }
+ default:
+ desc->depth--;
+ break;
+ case 0:
+ printk(KERN_ERR "enable_irq(%u) unbalanced from %p\n",
+ irq, (void *) __builtin_return_address(0));
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+EXPORT_SYMBOL(enable_irq);
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
+{
+ /*
+ * We ack quickly, we don't want the irq controller
+ * thinking we're snobs just because some other CPU has
+ * disabled global interrupts (we have already done the
+ * INT_ACK cycles, it's too late to try to pretend to the
+ * controller that we aren't taking the interrupt).
+ *
+ * 0 return value means that this irq is already being
+ * handled by some other CPU. (or is disabled)
+ */
+ irq_desc_t *desc = irq_descp(irq);
+ struct irqaction * action;
+ irqreturn_t action_ret;
+ unsigned int status;
+ int cpu;
+
+ cpu = smp_processor_id(); /* for CONFIG_PREEMPT, this must come after irq_enter()! */
+
+#ifndef XEN
+ kstat_cpu(cpu).irqs[irq]++;
+#endif
+
+ if (desc->status & IRQ_PER_CPU) {
+ /* no locking required for CPU-local interrupts: */
+ desc->handler->ack(irq);
+ action_ret = handle_IRQ_event(irq, regs, desc->action);
+ desc->handler->end(irq);
+ } else {
+ spin_lock(&desc->lock);
+ desc->handler->ack(irq);
+ /*
+ * REPLAY is when Linux resends an IRQ that was dropped earlier
+ * WAITING is used by probe to mark irqs that are being tested
+ */
+#ifdef XEN
+ status = desc->status & ~IRQ_REPLAY;
+#else
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+#endif
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
+ action = desc->action;
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ status |= IRQ_INPROGRESS; /* we are handling it */
+ }
+ desc->status = status;
+
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ * Since we set PENDING, if another processor is handling
+ * a different instance of this same irq, the other processor
+ * will take care of it.
+ */
+ if (unlikely(!action))
+ goto out;
+
+ /*
+ * Edge triggered interrupts need to remember
+ * pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in do_IRQ
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ spin_unlock(&desc->lock);
+ action_ret = handle_IRQ_event(irq, regs, action);
+ spin_lock(&desc->lock);
+#ifndef XEN
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret);
+#endif
+ if (!(desc->status & IRQ_PENDING))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ }
+ desc->status &= ~IRQ_INPROGRESS;
+ out:
+ /*
+ * The ->end() handler has to deal with interrupts which got
+ * disabled while the handler was running.
+ */
+ desc->handler->end(irq);
+ spin_unlock(&desc->lock);
+ }
+ return 1;
+}
+
+/**
+ * request_irq - allocate an interrupt line
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs
+ * @irqflags: Interrupt type flags
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources and enables the
+ * interrupt line and IRQ handling. From the point this
+ * call is made your handler function may be invoked. Since
+ * your handler function must clear any interrupt the board
+ * raises, you must take care both to initialise your hardware
+ * and to set up the interrupt handler in the right order.
+ *
+ * Dev_id must be globally unique. Normally the address of the
+ * device data structure is used as the cookie. Since the handler
+ * receives this value it makes sense to use it.
+ *
+ * If your interrupt is shared you must pass a non NULL dev_id
+ * as this is required when freeing the interrupt.
+ *
+ * Flags:
+ *
+ * SA_SHIRQ Interrupt is shared
+ *
+ * SA_INTERRUPT Disable local interrupts while processing
+ *
+ * SA_SAMPLE_RANDOM The interrupt can be used for entropy
+ *
+ */
+
+int request_irq(unsigned int irq,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags,
+ const char * devname,
+ void *dev_id)
+{
+ int retval;
+ struct irqaction * action;
+
+#if 1
+ /*
+ * Sanity-check: shared interrupts should REALLY pass in
+ * a real dev-ID, otherwise we'll have trouble later trying
+ * to figure out which interrupt is which (messes up the
+ * interrupt freeing logic etc).
+ */
+ if (irqflags & SA_SHIRQ) {
+ if (!dev_id)
+ printk(KERN_ERR "Bad boy: %s called us without a dev_id!\n", devname);
+ }
+#endif
+
+ if (irq >= NR_IRQS)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = (struct irqaction *)
+ kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+#ifndef XEN
+ action->flags = irqflags;
+ action->mask = 0;
+#endif
+ action->name = devname;
+#ifndef XEN
+ action->next = NULL;
+#endif
+ action->dev_id = dev_id;
+
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+/**
+ * free_irq - free an interrupt
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * Remove an interrupt handler. The handler is removed and if the
+ * interrupt line is no longer in use by any driver it is disabled.
+ * On a shared IRQ the caller must ensure the interrupt is disabled
+ * on the card it drives before calling this function. The function
+ * does not return until any executing interrupts for this IRQ
+ * have completed.
+ *
+ * This function must not be called from interrupt context.
+ */
+
+#ifdef XEN
+void free_irq(unsigned int irq)
+#else
+void free_irq(unsigned int irq, void *dev_id)
+#endif
+{
+ irq_desc_t *desc;
+ struct irqaction **p;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS)
+ return;
+
+ desc = irq_descp(irq);
+ spin_lock_irqsave(&desc->lock,flags);
+#ifdef XEN
+ if (desc->action) {
+ struct irqaction * action = desc->action;
+ desc->action = NULL;
+#else
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!desc->action) {
+#endif
+ desc->status |= IRQ_DISABLED;
+ desc->handler->shutdown(irq);
+#ifndef XEN
+ }
+#endif
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ /* Wait to make sure it's not being used on another CPU */
+ synchronize_irq(irq);
+ kfree(action);
+ return;
+ }
+ printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+#ifndef XEN
+ return;
+ }
+#endif
+}
+
+EXPORT_SYMBOL(free_irq);
+
+/*
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
+ */
+
+static DECLARE_MUTEX(probe_sem);
+
+/**
+ * probe_irq_on - begin an interrupt autodetect
+ *
+ * Commence probing for an interrupt. The interrupts are scanned
+ * and a mask of potential interrupt lines is returned.
+ *
+ */
+
+#ifndef XEN
+unsigned long probe_irq_on(void)
+{
+ unsigned int i;
+ irq_desc_t *desc;
+ unsigned long val;
+ unsigned long delay;
+
+ down(&probe_sem);
+ /*
+ * something may have generated an irq long ago and we want to
+ * flush such a longstanding irq before considering it as spurious.
+ */
+ for (i = NR_IRQS-1; i > 0; i--) {
+ desc = irq_descp(i);
+
+ spin_lock_irq(&desc->lock);
+ if (!desc->action)
+ desc->handler->startup(i);
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /* Wait for longstanding interrupts to trigger. */
+ for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
+ /* about 20ms delay */ barrier();
+
+ /*
+ * enable any unassigned irqs
+ * (we must startup again here because if a longstanding irq
+ * happened in the previous stage, it may have masked itself)
+ */
+ for (i = NR_IRQS-1; i > 0; i--) {
+ desc = irq_descp(i);
+
+ spin_lock_irq(&desc->lock);
+ if (!desc->action) {
+ desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
+ if (desc->handler->startup(i))
+ desc->status |= IRQ_PENDING;
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /*
+ * Wait for spurious interrupts to trigger
+ */
+ for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+ /* about 100ms delay */ barrier();
+
+ /*
+ * Now filter out any obviously spurious interrupts
+ */
+ val = 0;
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_desc_t *desc = irq_descp(i);
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ /* It triggered already - consider it spurious. */
+ if (!(status & IRQ_WAITING)) {
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ } else
+ if (i < 32)
+ val |= 1 << i;
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ return val;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+/**
+ * probe_irq_mask - scan a bitmap of interrupt lines
+ * @val: mask of interrupts to consider
+ *
+ * Scan the ISA bus interrupt lines and return a bitmap of
+ * active interrupts. The interrupt probe logic state is then
+ * returned to its previous value.
+ *
+ * Note: we need to scan all the irq's even though we will
+ * only return ISA irq numbers - just so that we reset them
+ * all to a known state.
+ */
+unsigned int probe_irq_mask(unsigned long val)
+{
+ int i;
+ unsigned int mask;
+
+ mask = 0;
+ for (i = 0; i < 16; i++) {
+ irq_desc_t *desc = irq_descp(i);
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ if (!(status & IRQ_WAITING))
+ mask |= 1 << i;
+
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+ up(&probe_sem);
+
+ return mask & val;
+}
+EXPORT_SYMBOL(probe_irq_mask);
+
+/**
+ * probe_irq_off - end an interrupt autodetect
+ * @val: mask of potential interrupts (unused)
+ *
+ * Scans the unused interrupt lines and returns the line which
+ * appears to have triggered the interrupt. If no interrupt was
+ * found then zero is returned. If more than one interrupt is
+ * found then minus the first candidate is returned to indicate
+ * their is doubt.
+ *
+ * The interrupt probe logic state is returned to its previous
+ * value.
+ *
+ * BUGS: When used in a module (which arguably shouldn't happen)
+ * nothing prevents two IRQ probe callers from overlapping. The
+ * results of this are non-optimal.
+ */
+
+int probe_irq_off(unsigned long val)
+{
+ int i, irq_found, nr_irqs;
+
+ nr_irqs = 0;
+ irq_found = 0;
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_desc_t *desc = irq_descp(i);
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ if (!(status & IRQ_WAITING)) {
+ if (!nr_irqs)
+ irq_found = i;
+ nr_irqs++;
+ }
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+ up(&probe_sem);
+
+ if (nr_irqs > 1)
+ irq_found = -irq_found;
+ return irq_found;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+#endif
+
+int setup_irq(unsigned int irq, struct irqaction * new)
+{
+ int shared = 0;
+ unsigned long flags;
+ struct irqaction *old, **p;
+ irq_desc_t *desc = irq_descp(irq);
+
+#ifndef XEN
+ if (desc->handler == &no_irq_type)
+ return -ENOSYS;
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ if (new->flags & SA_PERCPU_IRQ) {
+ desc->status |= IRQ_PER_CPU;
+ desc->handler = &irq_type_ia64_lsapic;
+ }
+#endif
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ if ((old = *p) != NULL) {
+#ifdef XEN
+ if (1) {
+ /* Can't share interrupts unless both agree to */
+#else
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+#endif
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -EBUSY;
+ }
+
+#ifndef XEN
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+#endif
+ }
+
+ *p = new;
+
+#ifndef XEN
+ if (!shared) {
+#else
+ {
+#endif
+ desc->depth = 0;
+#ifdef XEN
+ desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS);
+#else
+ desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
+#endif
+ desc->handler->startup(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+#ifndef XEN
+ register_irq_proc(irq);
+#endif
+ return 0;
+}
+
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir [NR_IRQS];
+
+#ifdef CONFIG_SMP
+
+static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
+
+static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
+
+static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
+
+void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
+{
+ cpumask_t mask = CPU_MASK_NONE;
+
+ cpu_set(cpu_logical_id(hwid), mask);
+
+ if (irq < NR_IRQS) {
+ irq_affinity[irq] = mask;
+ irq_redir[irq] = (char) (redir & 0xff);
+ }
+}
+
+static int irq_affinity_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = sprintf(page, "%s", irq_redir[(long)data] ? "r " : "");
+
+ len += cpumask_scnprintf(page+len, count, irq_affinity[(long)data]);
+ if (count - len < 2)
+ return -EINVAL;
+ len += sprintf(page + len, "\n");
+ return len;
+}
+
+static int irq_affinity_write_proc (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ unsigned int irq = (unsigned long) data;
+ int full_count = count, err;
+ cpumask_t new_value, tmp;
+# define R_PREFIX_LEN 16
+ char rbuf[R_PREFIX_LEN];
+ int rlen;
+ int prelen;
+ irq_desc_t *desc = irq_descp(irq);
+ unsigned long flags;
+
+ if (!desc->handler->set_affinity)
+ return -EIO;
+
+ /*
+ * If string being written starts with a prefix of 'r' or 'R'
+ * and some limited number of spaces, set IA64_IRQ_REDIRECTED.
+ * If more than (R_PREFIX_LEN - 2) spaces are passed, they won't
+ * all be trimmed as part of prelen, the untrimmed spaces will
+ * cause the hex parsing to fail, and this write() syscall will
+ * fail with EINVAL.
+ */
+
+ if (!count)
+ return -EINVAL;
+ rlen = min(sizeof(rbuf)-1, count);
+ if (copy_from_user(rbuf, buffer, rlen))
+ return -EFAULT;
+ rbuf[rlen] = 0;
+ prelen = 0;
+ if (tolower(*rbuf) == 'r') {
+ prelen = strspn(rbuf, "Rr ");
+ irq |= IA64_IRQ_REDIRECTED;
+ }
+
+ err = cpumask_parse(buffer+prelen, count-prelen, new_value);
+ if (err)
+ return err;
+
+ /*
+ * Do not allow disabling IRQs completely - it's a too easy
+ * way to make the system unusable accidentally :-) At least
+ * one online CPU still has to be targeted.
+ */
+ cpus_and(tmp, new_value, cpu_online_map);
+ if (cpus_empty(tmp))
+ return -EINVAL;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ pending_irq_cpumask[irq] = new_value;
+ spin_unlock_irqrestore(&desc->lock, flags);
+
+ return full_count;
+}
+
+void move_irq(int irq)
+{
+ /* note - we hold desc->lock */
+ cpumask_t tmp;
+ irq_desc_t *desc = irq_descp(irq);
+
+ if (!cpus_empty(pending_irq_cpumask[irq])) {
+ cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
+ if (unlikely(!cpus_empty(tmp))) {
+ desc->handler->set_affinity(irq, pending_irq_cpumask[irq]);
+ }
+ cpus_clear(pending_irq_cpumask[irq]);
+ }
+}
+
+
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_HOTPLUG_CPU
+unsigned int vectors_in_migration[NR_IRQS];
+
+/*
+ * Since cpu_online_map is already updated, we just need to check for
+ * affinity that has zeros
+ */
+static void migrate_irqs(void)
+{
+ cpumask_t mask;
+ irq_desc_t *desc;
+ int irq, new_cpu;
+
+ for (irq=0; irq < NR_IRQS; irq++) {
+ desc = irq_descp(irq);
+
+ /*
+ * No handling for now.
+ * TBD: Implement a disable function so we can now
+ * tell CPU not to respond to these local intr sources.
+ * such as ITV,CPEI,MCA etc.
+ */
+ if (desc->status == IRQ_PER_CPU)
+ continue;
+
+ cpus_and(mask, irq_affinity[irq], cpu_online_map);
+ if (any_online_cpu(mask) == NR_CPUS) {
+ /*
+ * Save it for phase 2 processing
+ */
+ vectors_in_migration[irq] = irq;
+
+ new_cpu = any_online_cpu(cpu_online_map);
+ mask = cpumask_of_cpu(new_cpu);
+
+ /*
+ * Al three are essential, currently WARN_ON.. maybe panic?
+ */
+ if (desc->handler && desc->handler->disable &&
+ desc->handler->enable && desc->handler->set_affinity) {
+ desc->handler->disable(irq);
+ desc->handler->set_affinity(irq, mask);
+ desc->handler->enable(irq);
+ } else {
+ WARN_ON((!(desc->handler) || !(desc->handler->disable) ||
+ !(desc->handler->enable) ||
+ !(desc->handler->set_affinity)));
+ }
+ }
+ }
+}
+
+void fixup_irqs(void)
+{
+ unsigned int irq;
+ extern void ia64_process_pending_intr(void);
+
+ ia64_set_itv(1<<16);
+ /*
+ * Phase 1: Locate irq's bound to this cpu and
+ * relocate them for cpu removal.
+ */
+ migrate_irqs();
+
+ /*
+ * Phase 2: Perform interrupt processing for all entries reported in
+ * local APIC.
+ */
+ ia64_process_pending_intr();
+
+ /*
+ * Phase 3: Now handle any interrupts not captured in local APIC.
+ * This is to account for cases that device interrupted during the time the
+ * rte was being disabled and re-programmed.
+ */
+ for (irq=0; irq < NR_IRQS; irq++) {
+ if (vectors_in_migration[irq]) {
+ vectors_in_migration[irq]=0;
+ do_IRQ(irq, NULL);
+ }
+ }
+
+ /*
+ * Now let processor die. We do irq disable and max_xtp() to
+ * ensure there is no more interrupts routed to this processor.
+ * But the local timer interrupt can have 1 pending which we
+ * take care in timer_interrupt().
+ */
+ max_xtp();
+ local_irq_disable();
+}
+#endif
+
+#ifndef XEN
+static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
+ if (count - len < 2)
+ return -EINVAL;
+ len += sprintf(page + len, "\n");
+ return len;
+}
+
+static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ cpumask_t *mask = (cpumask_t *)data;
+ unsigned long full_count = count, err;
+ cpumask_t new_value;
+
+ err = cpumask_parse(buffer, count, new_value);
+ if (err)
+ return err;
+
+ *mask = new_value;
+ return full_count;
+}
+
+#define MAX_NAMELEN 10
+
+static void register_irq_proc (unsigned int irq)
+{
+ char name [MAX_NAMELEN];
+
+ if (!root_irq_dir || (irq_descp(irq)->handler == &no_irq_type) || irq_dir[irq])
+ return;
+
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%d", irq);
+
+ /* create /proc/irq/1234 */
+ irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+#ifdef CONFIG_SMP
+ {
+ struct proc_dir_entry *entry;
+
+ /* create /proc/irq/1234/smp_affinity */
+ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = (void *)(long)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+ }
+
+ smp_affinity_entry[irq] = entry;
+ }
+#endif
+}
+
+cpumask_t prof_cpu_mask = CPU_MASK_ALL;
+
+void init_irq_proc (void)
+{
+ struct proc_dir_entry *entry;
+ int i;
+
+ /* create /proc/irq */
+ root_irq_dir = proc_mkdir("irq", 0);
+
+ /* create /proc/irq/prof_cpu_mask */
+ entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
+
+ if (!entry)
+ return;
+
+ entry->nlink = 1;
+ entry->data = (void *)&prof_cpu_mask;
+ entry->read_proc = prof_cpu_mask_read_proc;
+ entry->write_proc = prof_cpu_mask_write_proc;
+
+ /*
+ * Create entries for all existing IRQs.
+ */
+ for (i = 0; i < NR_IRQS; i++) {
+ if (irq_descp(i)->handler == &no_irq_type)
+ continue;
+ register_irq_proc(i);
+ }
+}
+#endif
+
+
+#ifdef XEN
+/*
+ * HANDLING OF GUEST-BOUND PHYSICAL IRQS
+ */
+
+#define IRQ_MAX_GUESTS 7
+typedef struct {
+ u8 nr_guests;
+ u8 in_flight;
+ u8 shareable;
+ struct domain *guest[IRQ_MAX_GUESTS];
+} irq_guest_action_t;
+
+static void __do_IRQ_guest(int irq)
+{
+ irq_desc_t *desc = &irq_desc[irq];
+ irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
+ struct domain *d;
+ int i;
+
+ for ( i = 0; i < action->nr_guests; i++ )
+ {
+ d = action->guest[i];
+ if ( !test_and_set_bit(irq, &d->pirq_mask) )
+ action->in_flight++;
+ send_guest_pirq(d, irq);
+ }
+}
+
+int pirq_guest_unmask(struct domain *d)
+{
+ irq_desc_t *desc;
+ int i, j, pirq;
+ u32 m;
+ shared_info_t *s = d->shared_info;
+
+ for ( i = 0; i < ARRAY_SIZE(d->pirq_mask); i++ )
+ {
+ m = d->pirq_mask[i];
+ while ( (j = ffs(m)) != 0 )
+ {
+ m &= ~(1 << --j);
+ pirq = (i << 5) + j;
+ desc = &irq_desc[pirq];
+ spin_lock_irq(&desc->lock);
+ if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
+ test_and_clear_bit(pirq, &d->pirq_mask) &&
+ (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
+ desc->handler->end(pirq);
+ spin_unlock_irq(&desc->lock);
+ }
+ }
+
+ return 0;
+}
+
+int pirq_guest_bind(struct exec_domain *d, int irq, int will_share)
+{
+ irq_desc_t *desc = &irq_desc[irq];
+ irq_guest_action_t *action;
+ unsigned long flags;
+ int rc = 0;
+
+ if ( !IS_CAPABLE_PHYSDEV(d->domain) )
+ return -EPERM;
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ action = (irq_guest_action_t *)desc->action;
+
+ if ( !(desc->status & IRQ_GUEST) )
+ {
+ if ( desc->action != NULL )
+ {
+ DPRINTK("Cannot bind IRQ %d to guest. In use by '%s'.\n",
+ irq, desc->action->name);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ action = xmalloc(sizeof(irq_guest_action_t));
+ if ( (desc->action = (struct irqaction *)action) == NULL )
+ {
+ DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ action->nr_guests = 0;
+ action->in_flight = 0;
+ action->shareable = will_share;
+
+ desc->depth = 0;
+ desc->status |= IRQ_GUEST;
+ desc->status &= ~IRQ_DISABLED;
+ desc->handler->startup(irq);
+
+ /* Attempt to bind the interrupt target to the correct CPU. */
+ if ( desc->handler->set_affinity != NULL )
+ desc->handler->set_affinity(
+ irq, apicid_to_phys_cpu_present(d->processor));
+ }
+ else if ( !will_share || !action->shareable )
+ {
+ DPRINTK("Cannot bind IRQ %d to guest. Will not share with others.\n",
+ irq);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if ( action->nr_guests == IRQ_MAX_GUESTS )
+ {
+ DPRINTK("Cannot bind IRQ %d to guest. Already at max share.\n", irq);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ action->guest[action->nr_guests++] = d;
+
+ out:
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return rc;
+}
+
+int pirq_guest_unbind(struct domain *d, int irq)
+{
+ irq_desc_t *desc = &irq_desc[irq];
+ irq_guest_action_t *action;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ action = (irq_guest_action_t *)desc->action;
+
+ if ( test_and_clear_bit(irq, &d->pirq_mask) &&
+ (--action->in_flight == 0) )
+ desc->handler->end(irq);
+
+ if ( action->nr_guests == 1 )
+ {
+ desc->action = NULL;
+ xfree(action);
+ desc->depth = 1;
+ desc->status |= IRQ_DISABLED;
+ desc->status &= ~IRQ_GUEST;
+ desc->handler->shutdown(irq);
+ }
+ else
+ {
+ i = 0;
+ while ( action->guest[i] != d )
+ i++;
+ memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
+ action->nr_guests--;
+ }
+
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return 0;
+}
+
+int pirq_guest_bindable(int irq, int will_share)
+{
+ irq_desc_t *desc = &irq_desc[irq];
+ irq_guest_action_t *action;
+ unsigned long flags;
+ int okay;
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ action = (irq_guest_action_t *)desc->action;
+
+ /*
+ * To be bindable the IRQ must either be not currently bound (1), or
+ * it must be shareable (2) and not at its share limit (3).
+ */
+ okay = ((!(desc->status & IRQ_GUEST) && (action == NULL)) || /* 1 */
+ (action->shareable && will_share && /* 2 */
+ (action->nr_guests != IRQ_MAX_GUESTS))); /* 3 */
+
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return okay;
+}
+#endif
+
+#ifdef XEN
+#ifdef IA64
+// this is a temporary hack until real console input is implemented
+irqreturn_t guest_forward_keyboard_input(int irq, void *nada, struct pt_regs *regs)
+{
+ domain_pend_keyboard_interrupt(irq);
+}
+
+void serial_input_init(void)
+{
+ int retval;
+ int irq = 0x30; // FIXME
+
+ retval = request_irq(irq,guest_forward_keyboard_input,SA_INTERRUPT,"siminput",NULL);
+ if (retval) {
+ printk("serial_input_init: broken request_irq call\n");
+ while(1);
+ }
+}
+#endif
+#endif
--- /dev/null
+#
+# Makefile for ia64-specific library routines..
+#
+
+include $(BASEDIR)/Rules.mk
+
+OBJS := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
+ __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \
+ bitop.o checksum.o clear_page.o csum_partial_copy.o copy_page.o \
+ clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
+ flush.o ip_fast_csum.o do_csum.o copy_user.o \
+ memset.o strlen.o memcpy.o swiotlb.o
+
+default: $(OBJS)
+ $(LD) -r -o ia64lib.o $(OBJS)
+
+AFLAGS += -I$(BASEDIR)/include -D__ASSEMBLY__
+
+__divdi3.o: idiv64.S
+ $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
+
+__udivdi3.o: idiv64.S
+ $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DUNSIGNED -c -o $@ $<
+
+__moddi3.o: idiv64.S
+ $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -c -o $@ $<
+
+__umoddi3.o: idiv64.S
+ $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -DUNSIGNED -c -o $@ $<
+
+__divsi3.o: idiv32.S
+ $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $@ $<
+
+__udivsi3.o: idiv32.S
+ $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DUNSIGNED -c -o $@ $<
+
+__modsi3.o: idiv32.S
+ $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -c -o $@ $<
+
+__umodsi3.o: idiv32.S
+ $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -DMODULO -DUNSIGNED -c -o $@ $<
+
+clean:
+ rm -f *.o *~
--- /dev/null
+/*
+ * Initialize MMU support.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#ifdef XEN
+#include <xen/sched.h>
+#endif
+#include <linux/bootmem.h>
+#include <linux/efi.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#ifndef XEN
+#include <linux/personality.h>
+#endif
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#ifndef XEN
+#include <linux/proc_fs.h>
+#endif
+
+#ifndef XEN
+#include <asm/a.out.h>
+#endif
+#include <asm/bitops.h>
+#include <asm/dma.h>
+#ifndef XEN
+#include <asm/ia32.h>
+#endif
+#include <asm/io.h>
+#include <asm/machvec.h>
+#include <asm/numa.h>
+#include <asm/patch.h>
+#include <asm/pgalloc.h>
+#include <asm/sal.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/tlb.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/mca.h>
+
+#ifndef XEN
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+#endif
+
+extern void ia64_tlb_init (void);
+
+unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+unsigned long vmalloc_end = VMALLOC_END_INIT;
+EXPORT_SYMBOL(vmalloc_end);
+struct page *vmem_map;
+EXPORT_SYMBOL(vmem_map);
+#endif
+
+static int pgt_cache_water[2] = { 25, 50 };
+
+struct page *zero_page_memmap_ptr; /* map entry for zero page */
+EXPORT_SYMBOL(zero_page_memmap_ptr);
+
+#ifdef XEN
+void *high_memory;
+EXPORT_SYMBOL(high_memory);
+
+/////////////////////////////////////////////
+// following from linux-2.6.7/mm/mmap.c
+/* description of effects of mapping type and prot in current implementation.
+ * this is due to the limited x86 page protection hardware. The expected
+ * behavior is in parens:
+ *
+ * map_type prot
+ * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
+ * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (yes) yes w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (copy) copy w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ */
+pgprot_t protection_map[16] = {
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+};
+
+void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+{
+ printf("insert_vm_struct: called, not implemented yet\n");
+}
+
+/////////////////////////////////////////////
+//following from linux/mm/memory.c
+
+/*
+ * Allocate page middle directory.
+ *
+ * We've already handled the fast-path in-line, and we own the
+ * page table lock.
+ *
+ * On a two-level page table, this ends up actually being entirely
+ * optimized away.
+ */
+pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+ pmd_t *new;
+
+ spin_unlock(&mm->page_table_lock);
+ new = pmd_alloc_one(mm, address);
+ spin_lock(&mm->page_table_lock);
+ if (!new)
+ return NULL;
+
+ /*
+ * Because we dropped the lock, we should re-check the
+ * entry, as somebody else could have populated it..
+ */
+ if (pgd_present(*pgd)) {
+ pmd_free(new);
+ goto out;
+ }
+ pgd_populate(mm, pgd, new);
+out:
+ return pmd_offset(pgd, address);
+}
+
+pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+{
+ if (!pmd_present(*pmd)) {
+ struct page *new;
+
+ spin_unlock(&mm->page_table_lock);
+ new = pte_alloc_one(mm, address);
+ spin_lock(&mm->page_table_lock);
+ if (!new)
+ return NULL;
+
+ /*
+ * Because we dropped the lock, we should re-check the
+ * entry, as somebody else could have populated it..
+ */
+ if (pmd_present(*pmd)) {
+ pte_free(new);
+ goto out;
+ }
+ inc_page_state(nr_page_table_pages);
+ pmd_populate(mm, pmd, new);
+ }
+out:
+ return pte_offset_map(pmd, address);
+}
+/////////////////////////////////////////////
+#endif /* XEN */
+
+void
+check_pgt_cache (void)
+{
+ int low, high;
+
+ low = pgt_cache_water[0];
+ high = pgt_cache_water[1];
+
+ if (pgtable_cache_size > (u64) high) {
+ do {
+ if (pgd_quicklist)
+ free_page((unsigned long)pgd_alloc_one_fast(0));
+ if (pmd_quicklist)
+ free_page((unsigned long)pmd_alloc_one_fast(0, 0));
+ } while (pgtable_cache_size > (u64) low);
+ }
+}
+
+void
+update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
+{
+ unsigned long addr;
+ struct page *page;
+
+ if (!pte_exec(pte))
+ return; /* not an executable page... */
+
+ page = pte_page(pte);
+ /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
+ addr = (unsigned long) page_address(page);
+
+ if (test_bit(PG_arch_1, &page->flags))
+ return; /* i-cache is already coherent with d-cache */
+
+ flush_icache_range(addr, addr + PAGE_SIZE);
+ set_bit(PG_arch_1, &page->flags); /* mark page as clean */
+}
+
+inline void
+ia64_set_rbs_bot (void)
+{
+#ifdef XEN
+ unsigned stack_size = MAX_USER_STACK_SIZE;
+#else
+ unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
+#endif
+
+ if (stack_size > MAX_USER_STACK_SIZE)
+ stack_size = MAX_USER_STACK_SIZE;
+ current->thread.rbs_bot = STACK_TOP - stack_size;
+}
+
+/*
+ * This performs some platform-dependent address space initialization.
+ * On IA-64, we want to setup the VM area for the register backing
+ * store (which grows upwards) and install the gateway page which is
+ * used for signal trampolines, etc.
+ */
+void
+ia64_init_addr_space (void)
+{
+#ifdef XEN
+printf("ia64_init_addr_space: called, not implemented\n");
+#else
+ struct vm_area_struct *vma;
+
+ ia64_set_rbs_bot();
+
+ /*
+ * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
+ * the problem. When the process attempts to write to the register backing store
+ * for the first time, it will get a SEGFAULT in this case.
+ */
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (vma) {
+ memset(vma, 0, sizeof(*vma));
+ vma->vm_mm = current->mm;
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
+ vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
+ insert_vm_struct(current->mm, vma);
+ }
+
+ /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
+ if (!(current->personality & MMAP_PAGE_ZERO)) {
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (vma) {
+ memset(vma, 0, sizeof(*vma));
+ vma->vm_mm = current->mm;
+ vma->vm_end = PAGE_SIZE;
+ vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
+ vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
+ insert_vm_struct(current->mm, vma);
+ }
+ }
+#endif
+}
+
+void
+free_initmem (void)
+{
+ unsigned long addr, eaddr;
+
+ addr = (unsigned long) ia64_imva(__init_begin);
+ eaddr = (unsigned long) ia64_imva(__init_end);
+ while (addr < eaddr) {
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
+ free_page(addr);
+ ++totalram_pages;
+ addr += PAGE_SIZE;
+ }
+ printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
+ (__init_end - __init_begin) >> 10);
+}
+
+void
+free_initrd_mem (unsigned long start, unsigned long end)
+{
+ struct page *page;
+ /*
+ * EFI uses 4KB pages while the kernel can use 4KB or bigger.
+ * Thus EFI and the kernel may have different page sizes. It is
+ * therefore possible to have the initrd share the same page as
+ * the end of the kernel (given current setup).
+ *
+ * To avoid freeing/using the wrong page (kernel sized) we:
+ * - align up the beginning of initrd
+ * - align down the end of initrd
+ *
+ * | |
+ * |=============| a000
+ * | |
+ * | |
+ * | | 9000
+ * |/////////////|
+ * |/////////////|
+ * |=============| 8000
+ * |///INITRD////|
+ * |/////////////|
+ * |/////////////| 7000
+ * | |
+ * |KKKKKKKKKKKKK|
+ * |=============| 6000
+ * |KKKKKKKKKKKKK|
+ * |KKKKKKKKKKKKK|
+ * K=kernel using 8KB pages
+ *
+ * In this example, we must free page 8000 ONLY. So we must align up
+ * initrd_start and keep initrd_end as is.
+ */
+ start = PAGE_ALIGN(start);
+ end = end & PAGE_MASK;
+
+ if (start < end)
+ printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
+
+ for (; start < end; start += PAGE_SIZE) {
+ if (!virt_addr_valid(start))
+ continue;
+ page = virt_to_page(start);
+ ClearPageReserved(page);
+ set_page_count(page, 1);
+ free_page(start);
+ ++totalram_pages;
+ }
+}
+
+/*
+ * This installs a clean page in the kernel's page table.
+ */
+struct page *
+put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (!PageReserved(page))
+ printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
+ page_address(page));
+
+ pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
+
+ spin_lock(&init_mm.page_table_lock);
+ {
+ pmd = pmd_alloc(&init_mm, pgd, address);
+ if (!pmd)
+ goto out;
+ pte = pte_alloc_map(&init_mm, pmd, address);
+ if (!pte)
+ goto out;
+ if (!pte_none(*pte)) {
+ pte_unmap(pte);
+ goto out;
+ }
+ set_pte(pte, mk_pte(page, pgprot));
+ pte_unmap(pte);
+ }
+ out: spin_unlock(&init_mm.page_table_lock);
+ /* no need for flush_tlb */
+ return page;
+}
+
+static void
+setup_gate (void)
+{
+#ifndef XEN
+ struct page *page;
+
+ /*
+ * Map the gate page twice: once read-only to export the ELF headers etc. and once
+ * execute-only page to enable privilege-promotion via "epc":
+ */
+ page = virt_to_page(ia64_imva(__start_gate_section));
+ put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
+#ifdef HAVE_BUGGY_SEGREL
+ page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
+ put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
+#else
+ put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
+#endif
+ ia64_patch_gate();
+#endif
+}
+
+void __devinit
+ia64_mmu_init (void *my_cpu_data)
+{
+ unsigned long psr, pta, impl_va_bits;
+ extern void __devinit tlb_init (void);
+ int cpu;
+
+#ifdef CONFIG_DISABLE_VHPT
+# define VHPT_ENABLE_BIT 0
+#else
+# define VHPT_ENABLE_BIT 1
+#endif
+
+ /* Pin mapping for percpu area into TLB */
+ psr = ia64_clear_ic();
+ ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
+ pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
+ PERCPU_PAGE_SHIFT);
+
+ ia64_set_psr(psr);
+ ia64_srlz_i();
+
+ /*
+ * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
+ * address space. The IA-64 architecture guarantees that at least 50 bits of
+ * virtual address space are implemented but if we pick a large enough page size
+ * (e.g., 64KB), the mapped address space is big enough that it will overlap with
+ * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
+ * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
+ * problem in practice. Alternatively, we could truncate the top of the mapped
+ * address space to not permit mappings that would overlap with the VMLPT.
+ * --davidm 00/12/06
+ */
+# define pte_bits 3
+# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
+ /*
+ * The virtual page table has to cover the entire implemented address space within
+ * a region even though not all of this space may be mappable. The reason for
+ * this is that the Access bit and Dirty bit fault handlers perform
+ * non-speculative accesses to the virtual page table, so the address range of the
+ * virtual page table itself needs to be covered by virtual page table.
+ */
+# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
+# define POW2(n) (1ULL << (n))
+
+ impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
+
+ if (impl_va_bits < 51 || impl_va_bits > 61)
+ panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
+
+#ifdef XEN
+ vhpt_init();
+ alloc_dom0();
+#else
+ /* place the VMLPT at the end of each page-table mapped region: */
+ pta = POW2(61) - POW2(vmlpt_bits);
+
+ if (POW2(mapped_space_bits) >= pta)
+ panic("mm/init: overlap between virtually mapped linear page table and "
+ "mapped kernel space!");
+ /*
+ * Set the (virtually mapped linear) page table address. Bit
+ * 8 selects between the short and long format, bits 2-7 the
+ * size of the table, and bit 0 whether the VHPT walker is
+ * enabled.
+ */
+ ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
+#endif
+
+ ia64_tlb_init();
+
+#ifdef CONFIG_HUGETLB_PAGE
+ ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
+ ia64_srlz_d();
+#endif
+
+ cpu = smp_processor_id();
+
+#ifndef XEN
+ /* mca handler uses cr.lid as key to pick the right entry */
+ ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
+
+ /* insert this percpu data information into our list for MCA recovery purposes */
+ ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
+ /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
+ ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
+ ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
+ ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
+ ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
+ ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
+#endif
+}
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+
+int
+create_mem_map_page_table (u64 start, u64 end, void *arg)
+{
+ unsigned long address, start_page, end_page;
+ struct page *map_start, *map_end;
+ int node;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
+ map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
+
+ start_page = (unsigned long) map_start & PAGE_MASK;
+ end_page = PAGE_ALIGN((unsigned long) map_end);
+ node = paddr_to_nid(__pa(start));
+
+ for (address = start_page; address < end_page; address += PAGE_SIZE) {
+ pgd = pgd_offset_k(address);
+ if (pgd_none(*pgd))
+ pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ pmd = pmd_offset(pgd, address);
+
+ if (pmd_none(*pmd))
+ pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ pte = pte_offset_kernel(pmd, address);
+
+ if (pte_none(*pte))
+ set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
+ PAGE_KERNEL));
+ }
+ return 0;
+}
+
+struct memmap_init_callback_data {
+ struct page *start;
+ struct page *end;
+ int nid;
+ unsigned long zone;
+};
+
+static int
+virtual_memmap_init (u64 start, u64 end, void *arg)
+{
+ struct memmap_init_callback_data *args;
+ struct page *map_start, *map_end;
+
+ args = (struct memmap_init_callback_data *) arg;
+
+ map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
+ map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
+
+ if (map_start < args->start)
+ map_start = args->start;
+ if (map_end > args->end)
+ map_end = args->end;
+
+ /*
+ * We have to initialize "out of bounds" struct page elements that fit completely
+ * on the same pages that were allocated for the "in bounds" elements because they
+ * may be referenced later (and found to be "reserved").
+ */
+ map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
+ map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
+ / sizeof(struct page));
+
+ if (map_start < map_end)
+ memmap_init_zone(map_start, (unsigned long) (map_end - map_start),
+ args->nid, args->zone, page_to_pfn(map_start));
+ return 0;
+}
+
+void
+memmap_init (struct page *start, unsigned long size, int nid,
+ unsigned long zone, unsigned long start_pfn)
+{
+ if (!vmem_map)
+ memmap_init_zone(start, size, nid, zone, start_pfn);
+ else {
+ struct memmap_init_callback_data args;
+
+ args.start = start;
+ args.end = start + size;
+ args.nid = nid;
+ args.zone = zone;
+
+ efi_memmap_walk(virtual_memmap_init, &args);
+ }
+}
+
+int
+ia64_pfn_valid (unsigned long pfn)
+{
+ char byte;
+ struct page *pg = pfn_to_page(pfn);
+
+ return (__get_user(byte, (char *) pg) == 0)
+ && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
+ || (__get_user(byte, (char *) (pg + 1) - 1) == 0));
+}
+EXPORT_SYMBOL(ia64_pfn_valid);
+
+int
+find_largest_hole (u64 start, u64 end, void *arg)
+{
+ u64 *max_gap = arg;
+
+ static u64 last_end = PAGE_OFFSET;
+
+ /* NOTE: this algorithm assumes efi memmap table is ordered */
+
+#ifdef XEN
+//printf("find_largest_hole: start=%lx,end=%lx,max_gap=%lx\n",start,end,*(unsigned long *)arg);
+#endif
+ if (*max_gap < (start - last_end))
+ *max_gap = start - last_end;
+ last_end = end;
+#ifdef XEN
+//printf("find_largest_hole2: max_gap=%lx,last_end=%lx\n",*max_gap,last_end);
+#endif
+ return 0;
+}
+#endif /* CONFIG_VIRTUAL_MEM_MAP */
+
+static int
+count_reserved_pages (u64 start, u64 end, void *arg)
+{
+ unsigned long num_reserved = 0;
+ unsigned long *count = arg;
+
+ for (; start < end; start += PAGE_SIZE)
+ if (PageReserved(virt_to_page(start)))
+ ++num_reserved;
+ *count += num_reserved;
+ return 0;
+}
+
+/*
+ * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
+ * system call handler. When this option is in effect, all fsyscalls will end up bubbling
+ * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
+ * useful for performance testing, but conceivably could also come in handy for debugging
+ * purposes.
+ */
+
+static int nolwsys;
+
+static int __init
+nolwsys_setup (char *s)
+{
+ nolwsys = 1;
+ return 1;
+}
+
+__setup("nolwsys", nolwsys_setup);
+
+void
+mem_init (void)
+{
+ long reserved_pages, codesize, datasize, initsize;
+ unsigned long num_pgt_pages;
+ pg_data_t *pgdat;
+ int i;
+#ifndef XEN
+ static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
+#endif
+
+#ifdef CONFIG_PCI
+ /*
+ * This needs to be called _after_ the command line has been parsed but _before_
+ * any drivers that may need the PCI DMA interface are initialized or bootmem has
+ * been freed.
+ */
+ platform_dma_init();
+#endif
+
+#ifndef CONFIG_DISCONTIGMEM
+ if (!mem_map)
+ BUG();
+ max_mapnr = max_low_pfn;
+#endif
+
+ high_memory = __va(max_low_pfn * PAGE_SIZE);
+
+#ifndef XEN
+ kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
+ kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
+ kclist_add(&kcore_kernel, _stext, _end - _stext);
+#endif
+
+ for_each_pgdat(pgdat)
+ totalram_pages += free_all_bootmem_node(pgdat);
+
+ reserved_pages = 0;
+ efi_memmap_walk(count_reserved_pages, &reserved_pages);
+
+ codesize = (unsigned long) _etext - (unsigned long) _stext;
+ datasize = (unsigned long) _edata - (unsigned long) _etext;
+ initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
+ "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
+ num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
+ reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
+
+ /*
+ * Allow for enough (cached) page table pages so that we can map the entire memory
+ * at least once. Each task also needs a couple of page tables pages, so add in a
+ * fudge factor for that (don't use "threads-max" here; that would be wrong!).
+ * Don't allow the cache to be more than 10% of total memory, though.
+ */
+# define NUM_TASKS 500 /* typical number of tasks */
+ num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
+ if (num_pgt_pages > nr_free_pages() / 10)
+ num_pgt_pages = nr_free_pages() / 10;
+ if (num_pgt_pages > (u64) pgt_cache_water[1])
+ pgt_cache_water[1] = num_pgt_pages;
+
+#ifndef XEN
+ /*
+ * For fsyscall entrpoints with no light-weight handler, use the ordinary
+ * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
+ * code can tell them apart.
+ */
+ for (i = 0; i < NR_syscalls; ++i) {
+ extern unsigned long fsyscall_table[NR_syscalls];
+ extern unsigned long sys_call_table[NR_syscalls];
+
+ if (!fsyscall_table[i] || nolwsys)
+ fsyscall_table[i] = sys_call_table[i] | 1;
+ }
+#endif
+ setup_gate(); /* setup gate pages before we free up boot memory... */
+
+#ifdef CONFIG_IA32_SUPPORT
+ ia32_boot_gdt_init();
+#endif
+}
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/bootmem.h 2004-06-15 23:19:52.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/bootmem.h 2004-08-25 19:28:13.000000000 -0600
+@@ -41,7 +41,9 @@
+ extern void __init free_bootmem (unsigned long addr, unsigned long size);
+ extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal);
+ #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
++#ifndef XEN
+ extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
++#endif
+ #define alloc_bootmem(x) \
+ __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+ #define alloc_bootmem_low(x) \
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/current.h 2004-06-15 23:19:52.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/current.h 2004-08-25 19:28:12.000000000 -0600
+@@ -12,6 +12,14 @@
+ * In kernel mode, thread pointer (r13) is used to point to the current task
+ * structure.
+ */
++#ifdef XEN
++struct domain;
++#define get_current() ((struct exec_domain *) ia64_getreg(_IA64_REG_TP))
++#define current get_current()
++//#define set_current(d) ia64_setreg(_IA64_REG_TP,(void *)d);
++#define set_current(d) (ia64_r13 = (void *)d)
++#else
+ #define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
++#endif
+
+ #endif /* _ASM_IA64_CURRENT_H */
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/efi.c 2004-06-15 23:18:55.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/efi.c 2004-12-17 13:47:03.000000000 -0700
+@@ -25,6 +25,9 @@
+ #include <linux/types.h>
+ #include <linux/time.h>
+ #include <linux/efi.h>
++#ifdef XEN
++#include <xen/sched.h>
++#endif
+
+ #include <asm/io.h>
+ #include <asm/kregs.h>
+@@ -49,7 +52,10 @@
+ { \
+ struct ia64_fpreg fr[6]; \
+ efi_status_t ret; \
++ efi_time_cap_t *atc = NULL; \
+ \
++ if (tc) \
++ atc = adjust_arg(tc); \
+ ia64_save_scratch_fpregs(fr); \
+ ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), \
+ adjust_arg(tc)); \
+@@ -201,6 +207,7 @@
+ if ((*efi.get_time)(&tm, 0) != EFI_SUCCESS)
+ return;
+
++ dummy();
+ ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
+ ts->tv_nsec = tm.nanosecond;
+ }
+@@ -303,6 +310,10 @@
+ if (!(md->attribute & EFI_MEMORY_WB))
+ continue;
+
++#ifdef XEN
++// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
++ if (md->phys_addr >= 0x100000000) continue;
++#endif
+ /*
+ * granule_addr is the base of md's first granule.
+ * [granule_addr - first_non_wb_addr) is guaranteed to
+@@ -456,9 +467,11 @@
+
+ cpu = smp_processor_id();
+
++#ifndef XEN
+ /* insert this TR into our list for MCA recovery purposes */
+ ia64_mca_tlb_list[cpu].pal_base = vaddr & mask;
+ ia64_mca_tlb_list[cpu].pal_paddr = pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL));
++#endif
+ }
+ }
+
+@@ -680,6 +693,30 @@
+ return 0;
+ }
+
++#ifdef XEN
++// variation of efi_get_iobase which returns entire memory descriptor
++efi_memory_desc_t *
++efi_get_io_md (void)
++{
++ void *efi_map_start, *efi_map_end, *p;
++ efi_memory_desc_t *md;
++ u64 efi_desc_size;
++
++ efi_map_start = __va(ia64_boot_param->efi_memmap);
++ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
++ efi_desc_size = ia64_boot_param->efi_memdesc_size;
++
++ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
++ md = p;
++ if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
++ if (md->attribute & EFI_MEMORY_UC)
++ return md;
++ }
++ }
++ return 0;
++}
++#endif
++
+ u32
+ efi_mem_type (unsigned long phys_addr)
+ {
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/efi.h 2004-06-15 23:20:03.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/efi.h 2004-08-25 19:28:13.000000000 -0600
+@@ -15,8 +15,10 @@
+ #include <linux/string.h>
+ #include <linux/time.h>
+ #include <linux/types.h>
++#ifndef XEN
+ #include <linux/proc_fs.h>
+ #include <linux/rtc.h>
++#endif
+ #include <linux/ioport.h>
+
+ #include <asm/page.h>
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/entry.S 2005-01-23 13:23:36.000000000 -0700
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/entry.S 2004-12-17 13:47:03.000000000 -0700
+@@ -35,7 +35,9 @@
+
+ #include <asm/asmmacro.h>
+ #include <asm/cache.h>
++#ifndef XEN
+ #include <asm/errno.h>
++#endif
+ #include <asm/kregs.h>
+ #include <asm/offsets.h>
+ #include <asm/pgtable.h>
+@@ -46,6 +48,23 @@
+
+ #include "minstate.h"
+
++#ifdef XEN
++#define sys_execve 0
++#define do_fork 0
++#define syscall_trace 0
++#define schedule 0
++#define do_notify_resume_user 0
++#define ia64_rt_sigsuspend 0
++#define ia64_rt_sigreturn 0
++#define ia64_handle_unaligned 0
++#define errno 0
++#define sys_ni_syscall 0
++#define unw_init_frame_info 0
++#define sys_call_table 0
++#endif
++
++ /*
++
+ /*
+ * execve() is special because in case of success, we need to
+ * setup a null register window frame.
+@@ -178,11 +197,14 @@
+ DO_SAVE_SWITCH_STACK
+ .body
+
++#ifdef XEN
++//#undef IA64_TASK_THREAD_KSP_OFFSET
++//#define IA64_TASK_THREAD_KSP_OFFSET 0x38
+ adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
+ movl r25=init_task
+ mov r27=IA64_KR(CURRENT_STACK)
+ adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
+- dep r20=0,in0,61,3 // physical address of "current"
++ dep r20=0,in0,50,14 // physical address of "current"
+ ;;
+ st8 [r22]=sp // save kernel stack pointer of old task
+ shr.u r26=r20,IA64_GRANULE_SHIFT
+@@ -194,6 +216,22 @@
+ (p6) cmp.eq p7,p6=r26,r27
+ (p6) br.cond.dpnt .map
+ ;;
++#else
++ adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
++ mov r27=IA64_KR(CURRENT_STACK)
++ dep r20=0,in0,61,3 // physical address of "current"
++ ;;
++ st8 [r22]=sp // save kernel stack pointer of old task
++ shr.u r26=r20,IA64_GRANULE_SHIFT
++ adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
++ ;;
++ /*
++ * If we've already mapped this task's page, we can skip doing it again.
++ */
++ cmp.eq p7,p6=r26,r27
++(p6) br.cond.dpnt .map
++ ;;
++#endif
+ .done:
+ (p6) ssm psr.ic // if we we had to map, renable the psr.ic bit FIRST!!!
+ ;;
+@@ -211,6 +249,16 @@
+ br.ret.sptk.many rp // boogie on out in new context
+
+ .map:
++#ifdef XEN
++ // avoid overlapping with kernel TR
++ movl r25=KERNEL_START
++ dep r23=0,in0,0,KERNEL_TR_PAGE_SHIFT
++ ;;
++ cmp.eq p7,p0=r25,r23
++ ;;
++(p7) mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
++(p7) br.cond.sptk .done
++#endif
+ rsm psr.ic // interrupts (psr.i) are already disabled here
+ movl r25=PAGE_KERNEL
+ ;;
+@@ -367,7 +415,11 @@
+ * - b7 holds address to return to
+ * - must not touch r8-r11
+ */
++#ifdef XEN
++GLOBAL_ENTRY(load_switch_stack)
++#else
+ ENTRY(load_switch_stack)
++#endif
+ .prologue
+ .altrp b7
+
+@@ -595,6 +647,11 @@
+ */
+ br.call.sptk.many rp=ia64_invoke_schedule_tail
+ }
++#ifdef XEN
++ // new domains are cloned but not exec'ed so switch to user mode here
++ cmp.ne pKStk,pUStk=r0,r0
++ br.cond.spnt ia64_leave_kernel
++#else
+ .ret8:
+ adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
+ ;;
+@@ -603,6 +660,7 @@
+ mov r8=0
+ tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
+ (p6) br.cond.spnt .strace_check_retval
++#endif
+ ;; // added stop bits to prevent r8 dependency
+ END(ia64_ret_from_clone)
+ // fall through
+@@ -684,9 +742,14 @@
+ #endif /* CONFIG_PREEMPT */
+ adds r16=PT(LOADRS)+16,r12
+ adds r17=PT(AR_BSPSTORE)+16,r12
++#ifdef XEN
++ mov r31=r0
++ ;;
++#else
+ adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
+ ;;
+ (p6) ld4 r31=[r18] // load current_thread_info()->flags
++#endif
+ ld8 r19=[r16],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
+ nop.i 0
+ ;;
+@@ -745,7 +808,11 @@
+ mov b7=r0 // clear b7
+ ;;
+ (pUStk) st1 [r14]=r3
++#ifdef XEN
++ movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
++#else
+ addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
++#endif
+ ;;
+ mov r16=ar.bsp // get existing backing store pointer
+ srlz.i // ensure interruption collection is off
+@@ -796,9 +863,18 @@
+ ;;
+ (p6) cmp.eq.unc p6,p0=r21,r0 // p6 <- p6 && (r21 == 0)
+ #endif /* CONFIG_PREEMPT */
++#ifdef XEN
++ alloc loc0=ar.pfs,0,1,1,0
++ adds out0=16,r12
++ ;;
++(p6) br.call.sptk.many b0=deliver_pending_interrupt
++ mov ar.pfs=loc0
++ mov r31=r0
++#else
+ adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
+ ;;
+ (p6) ld4 r31=[r17] // load current_thread_info()->flags
++#endif
+ adds r21=PT(PR)+16,r12
+ ;;
+
+@@ -912,7 +988,11 @@
+ shr.u r18=r19,16 // get byte size of existing "dirty" partition
+ ;;
+ mov r16=ar.bsp // get existing backing store pointer
++#ifdef XEN
++ movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
++#else
+ addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
++#endif
+ ;;
+ ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
+ (pKStk) br.cond.dpnt skip_rbs_switch
+@@ -1264,6 +1344,7 @@
+ br.ret.sptk.many rp
+ END(unw_init_running)
+
++#ifndef XEN
+ .rodata
+ .align 8
+ .globl sys_call_table
+@@ -1526,3 +1607,4 @@
+ data8 sys_ni_syscall
+
+ .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
++#endif
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/gcc_intrin.h 2005-01-23 13:23:36.000000000 -0700
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/gcc_intrin.h 2004-08-25 19:28:13.000000000 -0600
+@@ -92,6 +92,9 @@
+
+ #define ia64_hint_pause 0
+
++#ifdef XEN
++#define ia64_hint(mode) 0
++#else
+ #define ia64_hint(mode) \
+ ({ \
+ switch (mode) { \
+@@ -100,6 +103,7 @@
+ break; \
+ } \
+ })
++#endif
+
+
+ /* Integer values for mux1 instruction */
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/hardirq.h 2004-06-15 23:19:02.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/hardirq.h 2004-12-17 13:47:03.000000000 -0700
+@@ -81,10 +81,19 @@
+ */
+ #define in_irq() (hardirq_count())
+ #define in_softirq() (softirq_count())
++#ifdef XEN
+ #define in_interrupt() (irq_count())
++#else
++#define in_interrupt() 0 // FIXME LATER
++#endif
+
++#ifdef XEN
++#define hardirq_trylock(cpu) (!in_interrupt())
++#define hardirq_endlock(cpu) do { } while (0)
++#else
+ #define hardirq_trylock() (!in_interrupt())
+ #define hardirq_endlock() do { } while (0)
++#endif
+
+ #ifdef CONFIG_PREEMPT
+ # include <linux/smp_lock.h>
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/head.S 2005-01-23 13:23:36.000000000 -0700
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/head.S 2004-12-17 13:47:03.000000000 -0700
+@@ -1,3 +1,8 @@
++#ifdef XEN
++#define console_print printf
++#define kernel_thread_helper 0
++#define sys_exit 0
++#endif
+ /*
+ * Here is where the ball gets rolling as far as the kernel is concerned.
+ * When control is transferred to _start, the bootload has already
+@@ -166,7 +171,11 @@
+ dep r18=0,r3,0,12
+ ;;
+ or r18=r17,r18
++#ifdef XEN
++ dep r2=-1,r3,50,14 // IMVA of task
++#else
+ dep r2=-1,r3,61,3 // IMVA of task
++#endif
+ ;;
+ mov r17=rr[r2]
+ ;;
+@@ -205,7 +214,11 @@
+ ;;
+ mov ar.rsc=0x3 // place RSE in eager mode
+
++#ifdef XEN
++(isBP) dep r28=-1,r28,50,14 // make address virtual
++#else
+ (isBP) dep r28=-1,r28,61,3 // make address virtual
++#endif
+ (isBP) movl r2=ia64_boot_param
+ ;;
+ (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
+@@ -238,14 +251,30 @@
+ br.call.sptk.many rp=sys_fw_init
+ .ret1:
+ #endif
++#ifdef XEN
++ alloc r2=ar.pfs,8,0,2,0
++ ;;
++#define fake_mbi_magic 0
++#define MULTIBOOT_INFO_SIZE 1024
++ .rodata
++fake_mbi:
++ .skip MULTIBOOT_INFO_SIZE
++ .previous
++ movl out0=fake_mbi
++ ;;
++ br.call.sptk.many rp=cmain
++#else
+ br.call.sptk.many rp=start_kernel
++#endif
+ .ret2: addl r3=@ltoff(halt_msg),gp
+ ;;
+ alloc r2=ar.pfs,8,0,2,0
+ ;;
+ ld8 out0=[r3]
+ br.call.sptk.many b0=console_print
++ ;;
+ self: br.sptk.many self // endless loop
++ ;;
+ END(_start)
+
+ GLOBAL_ENTRY(ia64_save_debug_regs)
+@@ -781,8 +810,13 @@
+ movl r18=KERNEL_START
+ dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
+ dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
++#ifdef XEN
++ dep r17=-1,r17,50,14
++ dep sp=-1,sp,50,14
++#else
+ dep r17=-1,r17,61,3
+ dep sp=-1,sp,61,3
++#endif
+ ;;
+ or r3=r3,r18
+ or r14=r14,r18
+@@ -838,7 +872,12 @@
+ * intermediate precision so that we can produce a full 64-bit result.
+ */
+ GLOBAL_ENTRY(sched_clock)
++#ifdef XEN
++ break 0;; // FIX IA64_CPUINFO_NSEC_PER_CYC_OFFSET
++ //movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
++#else
+ addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
++#endif
+ mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
+ ;;
+ ldf8 f8=[r8]
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/hp/sim/hpsim_irq.c 2004-06-15 23:20:26.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/hpsim_irq.c 2004-11-01 17:54:15.000000000 -0700
+@@ -9,7 +9,17 @@
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/irq.h>
++#ifdef XEN
++#include <asm/hw_irq.h>
++#endif
+
++#if 1
++void __init
++hpsim_irq_init (void)
++{
++ printf("*** hpsim_irq_init called: NOT NEEDED?!?!?\n");
++}
++#else
+ static unsigned int
+ hpsim_irq_startup (unsigned int irq)
+ {
+@@ -19,6 +29,10 @@
+ static void
+ hpsim_irq_noop (unsigned int irq)
+ {
++#if 1
++printf("hpsim_irq_noop: irq=%d\n",irq);
++while(irq);
++#endif
+ }
+
+ static struct hw_interrupt_type irq_type_hp_sim = {
+@@ -44,3 +58,4 @@
+ idesc->handler = &irq_type_hp_sim;
+ }
+ }
++#endif
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/hp/sim/hpsim_ssc.h 2004-06-15 23:19:43.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/hpsim_ssc.h 2004-08-29 01:04:23.000000000 -0600
+@@ -33,4 +33,23 @@
+ */
+ extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
+
++#ifdef XEN
++/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
++ * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
++#define SSC_OPEN 50
++#define SSC_CLOSE 51
++#define SSC_READ 52
++#define SSC_WRITE 53
++#define SSC_GET_COMPLETION 54
++#define SSC_WAIT_COMPLETION 55
++
++#define SSC_WRITE_ACCESS 2
++#define SSC_READ_ACCESS 1
++
++struct ssc_disk_req {
++ unsigned long addr;
++ unsigned long len;
++};
++#endif
++
+ #endif /* _IA64_PLATFORM_HPSIM_SSC_H */
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/hw_irq.h 2004-06-15 23:19:22.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/hw_irq.h 2004-08-27 09:07:38.000000000 -0600
+@@ -9,7 +9,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/sched.h>
+ #include <linux/types.h>
++#ifndef XEN
+ #include <linux/profile.h>
++#endif
+
+ #include <asm/machvec.h>
+ #include <asm/ptrace.h>
+@@ -96,7 +98,11 @@
+ * Default implementations for the irq-descriptor API:
+ */
+
++#ifdef XEN
++#define _irq_desc irq_desc
++#else
+ extern irq_desc_t _irq_desc[NR_IRQS];
++#endif
+
+ #ifndef CONFIG_IA64_GENERIC
+ static inline irq_desc_t *
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/ide.h 2004-06-15 23:19:36.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/ide.h 2004-08-25 19:28:13.000000000 -0600
+@@ -64,6 +64,32 @@
+ #define ide_init_default_irq(base) ide_default_irq(base)
+ #endif
+
++#ifdef XEN
++// this is moved to linux/ide.h in newer versions of linux
++typedef union {
++ unsigned all : 8; /* all of the bits together */
++ struct {
++ unsigned head : 4; /* always zeros here */
++ unsigned unit : 1; /* drive select number, 0 or 1 */
++ unsigned bit5 : 1; /* always 1 */
++ unsigned lba : 1; /* using LBA instead of CHS */
++ unsigned bit7 : 1; /* always 1 */
++ } b;
++} select_t;
++
++typedef union {
++ unsigned all : 8; /* all of the bits together */
++ struct {
++ unsigned bit0 : 1;
++ unsigned nIEN : 1; /* device INTRQ to host */
++ unsigned SRST : 1; /* host soft reset bit */
++ unsigned bit3 : 1; /* ATA-2 thingy */
++ unsigned reserved456 : 3;
++ unsigned HOB : 1; /* 48-bit address ordering */
++ } b;
++} control_t;
++#endif
++
+ #include <asm-generic/ide_iops.h>
+
+ #endif /* __KERNEL__ */
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/init_task.c 2004-06-15 23:20:26.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/init_task.c 2004-08-27 00:06:35.000000000 -0600
+@@ -15,10 +15,12 @@
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+
++#ifndef XEN
+ static struct fs_struct init_fs = INIT_FS;
+ static struct files_struct init_files = INIT_FILES;
+ static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
++#endif
+ struct mm_struct init_mm = INIT_MM(init_mm);
+
+ EXPORT_SYMBOL(init_mm);
+@@ -33,13 +35,19 @@
+
+ union {
+ struct {
++#ifdef XEN
++ struct domain task;
++#else
+ struct task_struct task;
+ struct thread_info thread_info;
++#endif
+ } s;
+ unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
+ } init_task_mem asm ("init_task") __attribute__((section(".data.init_task"))) = {{
+ .task = INIT_TASK(init_task_mem.s.task),
++#ifndef XEN
+ .thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
++#endif
+ }};
+
+ EXPORT_SYMBOL(init_task);
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/init_task.h 2004-06-15 23:18:57.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/init_task.h 2004-11-15 17:06:20.000000000 -0700
+@@ -31,6 +31,18 @@
+ .max_reqs = ~0U, \
+ }
+
++#ifdef XEN
++#define INIT_MM(name) \
++{ \
++ .mm_rb = RB_ROOT, \
++ .pgd = swapper_pg_dir, \
++ .mm_users = ATOMIC_INIT(2), \
++ .mm_count = ATOMIC_INIT(1), \
++ .page_table_lock = SPIN_LOCK_UNLOCKED, \
++ .mmlist = LIST_HEAD_INIT(name.mmlist), \
++ .cpu_vm_mask = CPU_MASK_ALL, \
++}
++#else
+ #define INIT_MM(name) \
+ { \
+ .mm_rb = RB_ROOT, \
+@@ -43,6 +55,7 @@
+ .cpu_vm_mask = CPU_MASK_ALL, \
+ .default_kioctx = INIT_KIOCTX(name.default_kioctx, name), \
+ }
++#endif
+
+ #define INIT_SIGNALS(sig) { \
+ .count = ATOMIC_INIT(1), \
+@@ -64,6 +77,15 @@
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
++#ifdef XEN
++#define INIT_TASK(tsk) \
++{ \
++ /*processor: 0,*/ \
++ /*id: IDLE_DOMAIN_ID,*/ \
++ /*flags: 1<<DF_IDLETASK,*/ \
++ refcnt: ATOMIC_INIT(1) \
++}
++#else
+ #define INIT_TASK(tsk) \
+ { \
+ .state = 0, \
+@@ -113,6 +135,7 @@
+ .switch_lock = SPIN_LOCK_UNLOCKED, \
+ .journal_info = NULL, \
+ }
++#endif
+
+
+
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/interrupt.h 2004-06-15 23:19:29.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/interrupt.h 2004-08-25 19:28:13.000000000 -0600
+@@ -32,6 +32,7 @@
+ #define IRQ_HANDLED (1)
+ #define IRQ_RETVAL(x) ((x) != 0)
+
++#ifndef XEN
+ struct irqaction {
+ irqreturn_t (*handler)(int, void *, struct pt_regs *);
+ unsigned long flags;
+@@ -46,6 +47,7 @@
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long, const char *, void *);
+ extern void free_irq(unsigned int, void *);
++#endif
+
+ /*
+ * Temporary defines for UP kernels, until all code gets fixed.
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/io.h 2004-06-15 23:18:57.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/io.h 2004-11-05 16:53:36.000000000 -0700
+@@ -23,7 +23,11 @@
+ #define __SLOW_DOWN_IO do { } while (0)
+ #define SLOW_DOWN_IO do { } while (0)
+
++#ifdef XEN
++#define __IA64_UNCACHED_OFFSET 0xdffc000000000000 /* region 6 */
++#else
+ #define __IA64_UNCACHED_OFFSET 0xc000000000000000 /* region 6 */
++#endif
+
+ /*
+ * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/irq.h 2005-01-23 13:23:36.000000000 -0700
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/irq.h 2004-08-25 19:28:13.000000000 -0600
+@@ -30,6 +30,15 @@
+ extern void enable_irq (unsigned int);
+ extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
+
++#ifdef XEN
++// dup'ed from signal.h to avoid changes to includes
++#define SA_NOPROFILE 0x02000000
++#define SA_SHIRQ 0x04000000
++#define SA_RESTART 0x10000000
++#define SA_INTERRUPT 0x20000000
++#define SA_SAMPLE_RANDOM SA_RESTART
++#endif
++
+ #ifdef CONFIG_SMP
+ extern void move_irq(int irq);
+ #else
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/irq_ia64.c 2004-06-15 23:19:13.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/irq_ia64.c 2004-11-22 17:41:59.000000000 -0700
+@@ -17,18 +17,26 @@
+ #include <linux/config.h>
+ #include <linux/module.h>
+
++#ifndef XEN
+ #include <linux/jiffies.h>
++#endif
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/ioport.h>
++#ifndef XEN
+ #include <linux/kernel_stat.h>
++#endif
+ #include <linux/slab.h>
++#ifndef XEN
+ #include <linux/ptrace.h>
+ #include <linux/random.h> /* for rand_initialize_irq() */
+ #include <linux/signal.h>
++#endif
+ #include <linux/smp.h>
++#ifndef XEN
+ #include <linux/smp_lock.h>
++#endif
+ #include <linux/threads.h>
+
+ #include <asm/bitops.h>
+@@ -101,6 +109,24 @@
+ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
+ {
+ unsigned long saved_tpr;
++#if 0
++//FIXME: For debug only, can be removed
++ static char firstirq = 1;
++ static char firsttime[256];
++ static char firstpend[256];
++ if (firstirq) {
++ int i;
++ for (i=0;i<256;i++) firsttime[i] = 1;
++ for (i=0;i<256;i++) firstpend[i] = 1;
++ firstirq = 0;
++ }
++ if (firsttime[vector]) {
++ printf("**** (entry) First received int on vector=%d,itc=%lx\n",
++ (unsigned long) vector, ia64_get_itc());
++ firsttime[vector] = 0;
++ }
++#endif
++
+
+ #if IRQ_DEBUG
+ {
+@@ -145,6 +171,27 @@
+ ia64_setreg(_IA64_REG_CR_TPR, vector);
+ ia64_srlz_d();
+
++#ifdef XEN
++ if (vector != 0xef) {
++ extern void vcpu_pend_interrupt(void *, int);
++#if 0
++ if (firsttime[vector]) {
++ printf("**** (iterate) First received int on vector=%d,itc=%lx\n",
++ (unsigned long) vector, ia64_get_itc());
++ firsttime[vector] = 0;
++ }
++ if (firstpend[vector]) {
++ printf("**** First pended int on vector=%d,itc=%lx\n",
++ (unsigned long) vector,ia64_get_itc());
++ firstpend[vector] = 0;
++ }
++#endif
++ //FIXME: TEMPORARY HACK!!!!
++ vcpu_pend_interrupt(dom0,vector);
++ domain_wake(dom0);
++ }
++ else
++#endif
+ do_IRQ(local_vector_to_irq(vector), regs);
+
+ /*
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/ivt.S 2004-06-15 23:18:59.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/ivt.S 2004-12-17 13:47:03.000000000 -0700
+@@ -1,3 +1,21 @@
++
++#ifdef XEN
++//#define CONFIG_DISABLE_VHPT // FIXME: change when VHPT is enabled??
++// these are all hacked out for now as the entire IVT
++// will eventually be replaced... just want to use it
++// for startup code to handle TLB misses
++//#define ia64_leave_kernel 0
++//#define ia64_ret_from_syscall 0
++//#define ia64_handle_irq 0
++//#define ia64_fault 0
++#define ia64_illegal_op_fault 0
++#define ia64_prepare_handle_unaligned 0
++#define ia64_bad_break 0
++#define ia64_trace_syscall 0
++#define sys_call_table 0
++#define sys_ni_syscall 0
++#include <asm/vhpt.h>
++#endif
+ /*
+ * arch/ia64/kernel/ivt.S
+ *
+@@ -76,6 +94,13 @@
+ mov r19=n;; /* prepare to save predicates */ \
+ br.sptk.many dispatch_to_fault_handler
+
++#ifdef XEN
++#define REFLECT(n) \
++ mov r31=pr; \
++ mov r19=n;; /* prepare to save predicates */ \
++ br.sptk.many dispatch_reflection
++#endif
++
+ .section .text.ivt,"ax"
+
+ .align 32768 // align on 32KB boundary
+@@ -213,6 +238,9 @@
+ // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
+ ENTRY(itlb_miss)
+ DBG_FAULT(1)
++#ifdef XEN
++ VHPT_CCHAIN_LOOKUP(itlb_miss,i)
++#endif
+ /*
+ * The ITLB handler accesses the L3 PTE via the virtually mapped linear
+ * page table. If a nested TLB miss occurs, we switch into physical
+@@ -257,6 +285,9 @@
+ // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
+ ENTRY(dtlb_miss)
+ DBG_FAULT(2)
++#ifdef XEN
++ VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
++#endif
+ /*
+ * The DTLB handler accesses the L3 PTE via the virtually mapped linear
+ * page table. If a nested TLB miss occurs, we switch into physical
+@@ -301,6 +332,10 @@
+ // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
+ ENTRY(alt_itlb_miss)
+ DBG_FAULT(3)
++#ifdef XEN
++// I think this is superfluous, once all regions have VHPT enabled
++// VHPT_CCHAIN_LOOKUP(alt_itlb_miss,i)
++#endif
+ mov r16=cr.ifa // get address that caused the TLB miss
+ movl r17=PAGE_KERNEL
+ mov r21=cr.ipsr
+@@ -339,6 +374,10 @@
+ // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
+ ENTRY(alt_dtlb_miss)
+ DBG_FAULT(4)
++#ifdef XEN
++// I think this is superfluous, once all regions have VHPT enabled
++// VHPT_CCHAIN_LOOKUP(alt_dtlb_miss,d)
++#endif
+ mov r16=cr.ifa // get address that caused the TLB miss
+ movl r17=PAGE_KERNEL
+ mov r20=cr.isr
+@@ -368,6 +407,17 @@
+ cmp.ne p8,p0=r0,r23
+ (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
+ (p8) br.cond.spnt page_fault
++#ifdef XEN
++ ;;
++ // FIXME: inadequate test, this is where we test for Xen address
++ // note that 0xfffc (cached) and 0xdffc (uncached) addresses
++ // should be OK. (Though no I/O is done in Xen, EFI needs uncached
++ // addresses and some domain EFI calls are passed through)
++ tbit.nz p0,p8=r16,50
++(p8) br.cond.spnt page_fault
++//(p8) br.cond.spnt 0
++ ;;
++#endif
+
+ dep r21=-1,r21,IA64_PSR_ED_BIT,1
+ or r19=r19,r17 // insert PTE control bits into r19
+@@ -448,6 +498,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
+ ENTRY(ikey_miss)
++#ifdef XEN
++ REFLECT(6)
++#endif
+ DBG_FAULT(6)
+ FAULT(6)
+ END(ikey_miss)
+@@ -460,9 +513,16 @@
+ srlz.i
+ ;;
+ SAVE_MIN_WITH_COVER
++#ifdef XEN
++ alloc r15=ar.pfs,0,0,4,0
++ mov out0=cr.ifa
++ mov out1=cr.isr
++ mov out3=cr.itir
++#else
+ alloc r15=ar.pfs,0,0,3,0
+ mov out0=cr.ifa
+ mov out1=cr.isr
++#endif
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic | PSR_DEFAULT_BITS
+@@ -483,6 +543,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
+ ENTRY(dkey_miss)
++#ifdef XEN
++ REFLECT(7)
++#endif
+ DBG_FAULT(7)
+ FAULT(7)
+ END(dkey_miss)
+@@ -491,6 +554,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
+ ENTRY(dirty_bit)
++#ifdef XEN
++ REFLECT(8)
++#endif
+ DBG_FAULT(8)
+ /*
+ * What we do here is to simply turn on the dirty bit in the PTE. We need to
+@@ -553,6 +619,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
+ ENTRY(iaccess_bit)
++#ifdef XEN
++ REFLECT(9)
++#endif
+ DBG_FAULT(9)
+ // Like Entry 8, except for instruction access
+ mov r16=cr.ifa // get the address that caused the fault
+@@ -618,6 +687,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
+ ENTRY(daccess_bit)
++#ifdef XEN
++ REFLECT(10)
++#endif
+ DBG_FAULT(10)
+ // Like Entry 8, except for data access
+ mov r16=cr.ifa // get the address that caused the fault
+@@ -686,6 +758,16 @@
+ * to prevent leaking bits from kernel to user level.
+ */
+ DBG_FAULT(11)
++#ifdef XEN
++ mov r16=cr.isr
++ mov r17=cr.iim
++ mov r31=pr
++ ;;
++ cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
++ // FIXME: may also need to check slot==2?
++(p7) br.sptk.many dispatch_privop_fault
++ br.sptk.many dispatch_break_fault
++#endif
+ mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat.
+ mov r17=cr.iim
+ mov r18=__IA64_BREAK_SYSCALL
+@@ -696,7 +778,9 @@
+ mov r27=ar.rsc
+ mov r26=ar.pfs
+ mov r28=cr.iip
++#ifndef XEN
+ mov r31=pr // prepare to save predicates
++#endif
+ mov r20=r1
+ ;;
+ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
+@@ -792,6 +876,36 @@
+ DBG_FAULT(13)
+ FAULT(13)
+
++#ifdef XEN
++ // There is no particular reason for this code to be here, other than that
++ // there happens to be space here that would go unused otherwise. If this
++ // fault ever gets "unreserved", simply moved the following code to a more
++ // suitable spot...
++
++ENTRY(dispatch_break_fault)
++ SAVE_MIN_WITH_COVER
++ ;;
++ alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
++ mov out0=cr.ifa
++ adds out1=16,sp
++ mov out2=cr.isr // FIXME: pity to make this slow access twice
++ mov out3=cr.iim // FIXME: pity to make this slow access twice
++
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i // restore psr.i
++ adds r3=8,r2 // set up second base pointer
++ ;;
++ SAVE_REST
++ movl r14=ia64_leave_kernel
++ ;;
++ mov rp=r14
++ br.sptk.many ia64_prepare_handle_break
++END(dispatch_break_fault)
++#endif
++
+ .org ia64_ivt+0x3800
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x3800 Entry 14 (size 64 bundles) Reserved
+@@ -842,9 +956,11 @@
+ * - ar.fpsr: set to kernel settings
+ */
+ GLOBAL_ENTRY(ia64_syscall_setup)
++#ifndef XEN
+ #if PT(B6) != 0
+ # error This code assumes that b6 is the first field in pt_regs.
+ #endif
++#endif
+ st8 [r1]=r19 // save b6
+ add r16=PT(CR_IPSR),r1 // initialize first base pointer
+ add r17=PT(R11),r1 // initialize second base pointer
+@@ -974,6 +1090,37 @@
+ DBG_FAULT(16)
+ FAULT(16)
+
++#ifdef XEN
++ // There is no particular reason for this code to be here, other than that
++ // there happens to be space here that would go unused otherwise. If this
++ // fault ever gets "unreserved", simply moved the following code to a more
++ // suitable spot...
++
++ENTRY(dispatch_privop_fault)
++ SAVE_MIN_WITH_COVER
++ ;;
++ alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
++ mov out0=cr.ifa
++ adds out1=16,sp
++ mov out2=cr.isr // FIXME: pity to make this slow access twice
++ mov out3=cr.itir
++
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i // restore psr.i
++ adds r3=8,r2 // set up second base pointer
++ ;;
++ SAVE_REST
++ movl r14=ia64_leave_kernel
++ ;;
++ mov rp=r14
++ br.sptk.many ia64_prepare_handle_privop
++END(dispatch_privop_fault)
++#endif
++
++
+ .org ia64_ivt+0x4400
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x4400 Entry 17 (size 64 bundles) Reserved
+@@ -1090,6 +1237,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
+ ENTRY(page_not_present)
++#ifdef XEN
++ REFLECT(20)
++#endif
+ DBG_FAULT(20)
+ mov r16=cr.ifa
+ rsm psr.dt
+@@ -1110,6 +1260,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
+ ENTRY(key_permission)
++#ifdef XEN
++ REFLECT(21)
++#endif
+ DBG_FAULT(21)
+ mov r16=cr.ifa
+ rsm psr.dt
+@@ -1123,6 +1276,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
+ ENTRY(iaccess_rights)
++#ifdef XEN
++ REFLECT(22)
++#endif
+ DBG_FAULT(22)
+ mov r16=cr.ifa
+ rsm psr.dt
+@@ -1136,6 +1292,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
+ ENTRY(daccess_rights)
++#ifdef XEN
++ REFLECT(23)
++#endif
+ DBG_FAULT(23)
+ mov r16=cr.ifa
+ rsm psr.dt
+@@ -1153,8 +1312,13 @@
+ mov r16=cr.isr
+ mov r31=pr
+ ;;
++#ifdef XEN
++ cmp4.ge p6,p0=0x20,r16
++(p6) br.sptk.many dispatch_privop_fault
++#else
+ cmp4.eq p6,p0=0,r16
+ (p6) br.sptk.many dispatch_illegal_op_fault
++#endif
+ ;;
+ mov r19=24 // fault number
+ br.sptk.many dispatch_to_fault_handler
+@@ -1164,6 +1328,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
+ ENTRY(disabled_fp_reg)
++#ifdef XEN
++ REFLECT(25)
++#endif
+ DBG_FAULT(25)
+ rsm psr.dfh // ensure we can access fph
+ ;;
+@@ -1177,6 +1344,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
+ ENTRY(nat_consumption)
++#ifdef XEN
++ REFLECT(26)
++#endif
+ DBG_FAULT(26)
+ FAULT(26)
+ END(nat_consumption)
+@@ -1185,6 +1355,10 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
+ ENTRY(speculation_vector)
++#ifdef XEN
++ // this probably need not reflect...
++ REFLECT(27)
++#endif
+ DBG_FAULT(27)
+ /*
+ * A [f]chk.[as] instruction needs to take the branch to the recovery code but
+@@ -1228,6 +1402,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
+ ENTRY(debug_vector)
++#ifdef XEN
++ REFLECT(29)
++#endif
+ DBG_FAULT(29)
+ FAULT(29)
+ END(debug_vector)
+@@ -1236,6 +1413,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
+ ENTRY(unaligned_access)
++#ifdef XEN
++ REFLECT(30)
++#endif
+ DBG_FAULT(30)
+ mov r16=cr.ipsr
+ mov r31=pr // prepare to save predicates
+@@ -1247,6 +1427,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
+ ENTRY(unsupported_data_reference)
++#ifdef XEN
++ REFLECT(31)
++#endif
+ DBG_FAULT(31)
+ FAULT(31)
+ END(unsupported_data_reference)
+@@ -1255,6 +1438,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
+ ENTRY(floating_point_fault)
++#ifdef XEN
++ REFLECT(32)
++#endif
+ DBG_FAULT(32)
+ FAULT(32)
+ END(floating_point_fault)
+@@ -1263,6 +1449,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
+ ENTRY(floating_point_trap)
++#ifdef XEN
++ REFLECT(33)
++#endif
+ DBG_FAULT(33)
+ FAULT(33)
+ END(floating_point_trap)
+@@ -1271,6 +1460,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
+ ENTRY(lower_privilege_trap)
++#ifdef XEN
++ REFLECT(34)
++#endif
+ DBG_FAULT(34)
+ FAULT(34)
+ END(lower_privilege_trap)
+@@ -1279,6 +1471,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
+ ENTRY(taken_branch_trap)
++#ifdef XEN
++ REFLECT(35)
++#endif
+ DBG_FAULT(35)
+ FAULT(35)
+ END(taken_branch_trap)
+@@ -1287,6 +1482,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
+ ENTRY(single_step_trap)
++#ifdef XEN
++ REFLECT(36)
++#endif
+ DBG_FAULT(36)
+ FAULT(36)
+ END(single_step_trap)
+@@ -1343,6 +1541,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
+ ENTRY(ia32_exception)
++#ifdef XEN
++ REFLECT(45)
++#endif
+ DBG_FAULT(45)
+ FAULT(45)
+ END(ia32_exception)
+@@ -1351,6 +1552,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
+ ENTRY(ia32_intercept)
++#ifdef XEN
++ REFLECT(46)
++#endif
+ DBG_FAULT(46)
+ #ifdef CONFIG_IA32_SUPPORT
+ mov r31=pr
+@@ -1381,6 +1585,9 @@
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
+ ENTRY(ia32_interrupt)
++#ifdef XEN
++ REFLECT(47)
++#endif
+ DBG_FAULT(47)
+ #ifdef CONFIG_IA32_SUPPORT
+ mov r31=pr
+@@ -1510,6 +1717,39 @@
+ DBG_FAULT(67)
+ FAULT(67)
+
++#ifdef XEN
++ .org ia64_ivt+0x8000
++ENTRY(dispatch_reflection)
++ /*
++ * Input:
++ * psr.ic: off
++ * r19: intr type (offset into ivt, see ia64_int.h)
++ * r31: contains saved predicates (pr)
++ */
++ SAVE_MIN_WITH_COVER_R19
++ alloc r14=ar.pfs,0,0,5,0
++ mov out4=r15
++ mov out0=cr.ifa
++ adds out1=16,sp
++ mov out2=cr.isr
++ mov out3=cr.iim
++// mov out3=cr.itir
++
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i // restore psr.i
++ adds r3=8,r2 // set up second base pointer
++ ;;
++ SAVE_REST
++ movl r14=ia64_leave_kernel
++ ;;
++ mov rp=r14
++ br.sptk.many ia64_prepare_handle_reflection
++END(dispatch_reflection)
++#endif
++
+ #ifdef CONFIG_IA32_SUPPORT
+
+ /*
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/kregs.h 2004-06-15 23:19:01.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/kregs.h 2004-09-17 18:27:22.000000000 -0600
+@@ -30,6 +30,10 @@
+ #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
+ #define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */
+ #define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- & register-stacks */
++#ifdef XEN
++#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */
++#define IA64_TR_VHPT 4 /* dtr4: vhpt */
++#endif
+
+ /* Processor status register bits: */
+ #define IA64_PSR_BE_BIT 1
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/vmlinux.lds.S 2004-06-15 23:19:52.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/xen.lds.S 2004-08-25 19:28:12.000000000 -0600
+@@ -11,12 +11,14 @@
+ OUTPUT_FORMAT("elf64-ia64-little")
+ OUTPUT_ARCH(ia64)
+ ENTRY(phys_start)
++#ifndef XEN
+ jiffies = jiffies_64;
+ PHDRS {
+ code PT_LOAD;
+ percpu PT_LOAD;
+ data PT_LOAD;
+ }
++#endif
+ SECTIONS
+ {
+ /* Sections to be discarded */
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/time.h 2004-06-15 23:19:37.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/xen/linuxtime.h 2004-11-15 17:42:04.000000000 -0700
+@@ -1,6 +1,11 @@
+ #ifndef _LINUX_TIME_H
+ #define _LINUX_TIME_H
+
++#ifdef XEN
++typedef s64 time_t;
++typedef s64 suseconds_t;
++#endif
++
+ #include <asm/param.h>
+ #include <linux/types.h>
+
+@@ -25,7 +30,9 @@
+ #ifdef __KERNEL__
+
+ #include <linux/spinlock.h>
++#ifndef XEN
+ #include <linux/seqlock.h>
++#endif
+ #include <linux/timex.h>
+ #include <asm/div64.h>
+ #ifndef div_long_long_rem
+@@ -322,7 +329,9 @@
+
+ extern struct timespec xtime;
+ extern struct timespec wall_to_monotonic;
++#ifndef XEN
+ extern seqlock_t xtime_lock;
++#endif
+
+ static inline unsigned long get_seconds(void)
+ {
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/minstate.h 2004-06-15 23:19:52.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/minstate.h 2004-12-15 16:36:00.000000000 -0700
+@@ -3,6 +3,11 @@
+ #include <asm/cache.h>
+
+ #include "entry.h"
++#ifdef XEN
++//this can be removed when offsets.h is properly generated
++#undef IA64_TASK_THREAD_ON_USTACK_OFFSET
++#define IA64_TASK_THREAD_ON_USTACK_OFFSET 0x34
++#endif
+
+ /*
+ * For ivt.s we want to access the stack virtually so we don't have to disable translation
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/mm/bootmem.c 2004-06-15 23:19:09.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/mm_bootmem.c 2004-12-17 13:47:03.000000000 -0700
+@@ -10,7 +10,9 @@
+ */
+
+ #include <linux/mm.h>
++#ifndef XEN
+ #include <linux/kernel_stat.h>
++#endif
+ #include <linux/swap.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+@@ -55,6 +57,9 @@
+ bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
+ bdata->node_boot_start = (start << PAGE_SHIFT);
+ bdata->node_low_pfn = end;
++#ifdef XEN
++//printk("init_bootmem_core: mapstart=%lx,start=%lx,end=%lx,bdata->node_bootmem_map=%lx,bdata->node_boot_start=%lx,bdata->node_low_pfn=%lx\n",mapstart,start,end,bdata->node_bootmem_map,bdata->node_boot_start,bdata->node_low_pfn);
++#endif
+
+ /*
+ * Initially all pages are reserved - setup_arch() has to
+@@ -146,6 +151,9 @@
+ unsigned long i, start = 0, incr, eidx;
+ void *ret;
+
++#ifdef XEN
++//printf("__alloc_bootmem_core(%lx,%lx,%lx,%lx) called\n",bdata,size,align,goal);
++#endif
+ if(!size) {
+ printk("__alloc_bootmem_core(): zero-sized request\n");
+ BUG();
+@@ -153,6 +161,9 @@
+ BUG_ON(align & (align-1));
+
+ eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
++#ifdef XEN
++//printf("__alloc_bootmem_core: eidx=%lx\n",eidx);
++#endif
+ offset = 0;
+ if (align &&
+ (bdata->node_boot_start & (align - 1UL)) != 0)
+@@ -182,6 +193,9 @@
+ unsigned long j;
+ i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
+ i = ALIGN(i, incr);
++#ifdef XEN
++//if (i >= eidx) goto fail_block;
++#endif
+ if (test_bit(i, bdata->node_bootmem_map))
+ continue;
+ for (j = i + 1; j < i + areasize; ++j) {
+@@ -203,6 +217,9 @@
+ return NULL;
+
+ found:
++#ifdef XEN
++//printf("__alloc_bootmem_core: start=%lx\n",start);
++#endif
+ bdata->last_success = start << PAGE_SHIFT;
+ BUG_ON(start >= eidx);
+
+@@ -262,6 +279,9 @@
+ page = virt_to_page(phys_to_virt(bdata->node_boot_start));
+ idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
+ map = bdata->node_bootmem_map;
++#ifdef XEN
++//printk("free_all_bootmem_core: bdata=%lx, bdata->node_boot_start=%lx, bdata->node_low_pfn=%lx, bdata->node_bootmem_map=%lx\n",bdata,bdata->node_boot_start,bdata->node_low_pfn,bdata->node_bootmem_map);
++#endif
+ for (i = 0; i < idx; ) {
+ unsigned long v = ~map[i / BITS_PER_LONG];
+ if (v) {
+@@ -285,6 +305,9 @@
+ * Now free the allocator bitmap itself, it's not
+ * needed anymore:
+ */
++#ifdef XEN
++//printk("About to free the allocator bitmap itself\n");
++#endif
+ page = virt_to_page(bdata->node_bootmem_map);
+ count = 0;
+ for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
+@@ -327,6 +350,9 @@
+ return(init_bootmem_core(&contig_page_data, start, 0, pages));
+ }
+
++#ifdef XEN
++#undef reserve_bootmem
++#endif
+ #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
+ void __init reserve_bootmem (unsigned long addr, unsigned long size)
+ {
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/mm/contig.c 2004-06-15 23:19:12.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/mm_contig.c 2004-10-05 18:09:45.000000000 -0600
+@@ -15,11 +15,23 @@
+ * memory.
+ */
+ #include <linux/config.h>
++#ifdef XEN
++#include <xen/sched.h>
++#endif
+ #include <linux/bootmem.h>
+ #include <linux/efi.h>
+ #include <linux/mm.h>
+ #include <linux/swap.h>
+
++#ifdef XEN
++#undef reserve_bootmem
++unsigned long max_mapnr;
++unsigned long num_physpages;
++extern struct page *zero_page_memmap_ptr;
++struct page *mem_map;
++#define MAX_DMA_ADDRESS ~0UL // FIXME???
++#endif
++
+ #include <asm/meminit.h>
+ #include <asm/pgalloc.h>
+ #include <asm/pgtable.h>
+@@ -80,6 +92,9 @@
+ {
+ unsigned long *max_pfnp = arg, pfn;
+
++#ifdef XEN
++//printf("find_max_pfn: start=%lx, end=%lx, *arg=%lx\n",start,end,*(unsigned long *)arg);
++#endif
+ pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
+ if (pfn > *max_pfnp)
+ *max_pfnp = pfn;
+@@ -149,6 +164,9 @@
+ /* first find highest page frame number */
+ max_pfn = 0;
+ efi_memmap_walk(find_max_pfn, &max_pfn);
++#ifdef XEN
++//printf("find_memory: efi_memmap_walk returns max_pfn=%lx\n",max_pfn);
++#endif
+
+ /* how many bytes to cover all the pages */
+ bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
+@@ -242,6 +260,9 @@
+ efi_memmap_walk(count_pages, &num_physpages);
+
+ max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
++#ifdef XEN
++//printf("paging_init: num_physpages=%lx, max_dma=%lx\n",num_physpages,max_dma);
++#endif
+
+ #ifdef CONFIG_VIRTUAL_MEM_MAP
+ memset(zholes_size, 0, sizeof(zholes_size));
+@@ -265,7 +286,13 @@
+
+ max_gap = 0;
+ efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
++#ifdef XEN
++//printf("paging_init: max_gap=%lx\n",max_gap);
++#endif
+ if (max_gap < LARGE_GAP) {
++#ifdef XEN
++//printf("paging_init: no large gap\n");
++#endif
+ vmem_map = (struct page *) 0;
+ free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
+ zholes_size);
+@@ -274,6 +301,9 @@
+ unsigned long map_size;
+
+ /* allocate virtual_mem_map */
++#ifdef XEN
++//printf("paging_init: large gap, allocating virtual_mem_map\n");
++#endif
+
+ map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+ vmalloc_end -= map_size;
+@@ -293,6 +323,10 @@
+ zones_size[ZONE_DMA] = max_dma;
+ zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
+ }
++#ifdef XEN
++//printf("paging_init: zones_size[ZONE_DMA]=%lx, zones_size[ZONE_NORMAL]=%lx, max_low_pfn=%lx\n",
++//zones_size[ZONE_DMA],zones_size[ZONE_NORMAL],max_low_pfn);
++#endif
+ free_area_init(zones_size);
+ #endif /* !CONFIG_VIRTUAL_MEM_MAP */
+ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/mmzone.h 2004-06-15 23:19:36.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/mmzone.h 2004-08-25 19:28:13.000000000 -0600
+@@ -185,7 +185,11 @@
+ char *name;
+ unsigned long spanned_pages; /* total size, including holes */
+ unsigned long present_pages; /* amount of memory (excluding holes) */
++#ifdef XEN
++};
++#else
+ } ____cacheline_maxaligned_in_smp;
++#endif
+
+
+ /*
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/page.h 2004-06-15 23:18:58.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/page.h 2004-12-17 13:47:03.000000000 -0700
+@@ -84,7 +84,11 @@
+ #endif
+
+ #ifndef CONFIG_DISCONTIGMEM
++#ifdef XEN
++#define pfn_valid(pfn) (0)
++#else
+ #define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
++#endif
+ #define page_to_pfn(page) ((unsigned long) (page - mem_map))
+ #define pfn_to_page(pfn) (mem_map + (pfn))
+ #endif /* CONFIG_DISCONTIGMEM */
+@@ -107,8 +111,25 @@
+ * expressed in this way to ensure they result in a single "dep"
+ * instruction.
+ */
++#ifdef XEN
++typedef union xen_va {
++ struct {
++ unsigned long off : 50;
++ unsigned long reg : 14;
++ } f;
++ unsigned long l;
++ void *p;
++} xen_va;
++
++// xen/drivers/console.c uses __va in a declaration (should be fixed!)
++#define __pa(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
++#define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
++//# define __pa(x) ((unsigned long)(((unsigned long)x) - PAGE_OFFSET))
++//# define __va(x) ((void *)((char *)(x) + PAGE_OFFSET))
++#else
+ #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
+ #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
++#endif
+
+ #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
+ #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
+@@ -180,11 +201,19 @@
+ # define __pgprot(x) (x)
+ #endif /* !STRICT_MM_TYPECHECKS */
+
++#ifdef XEN
++#define PAGE_OFFSET 0xfffc000000000000
++#else
+ #define PAGE_OFFSET 0xe000000000000000
++#endif
+
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
+ (((current->thread.flags & IA64_THREAD_XSTACK) != 0) \
+ ? VM_EXEC : 0))
+
++#ifdef XEN
++#define __flush_tlb() do {} while(0);
++#endif
++
+ #endif /* _ASM_IA64_PAGE_H */
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/mm/page_alloc.c 2004-06-15 23:18:57.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/page_alloc.c 2004-12-17 13:47:03.000000000 -0700
+@@ -19,20 +19,28 @@
+ #include <linux/mm.h>
+ #include <linux/swap.h>
+ #include <linux/interrupt.h>
++#ifndef XEN
+ #include <linux/pagemap.h>
++#endif
+ #include <linux/bootmem.h>
+ #include <linux/compiler.h>
+ #include <linux/module.h>
++#ifndef XEN
+ #include <linux/suspend.h>
+ #include <linux/pagevec.h>
+ #include <linux/blkdev.h>
++#endif
+ #include <linux/slab.h>
++#ifndef XEN
+ #include <linux/notifier.h>
++#endif
+ #include <linux/topology.h>
++#ifndef XEN
+ #include <linux/sysctl.h>
+ #include <linux/cpu.h>
+
+ #include <asm/tlbflush.h>
++#endif
+
+ DECLARE_BITMAP(node_online_map, MAX_NUMNODES);
+ struct pglist_data *pgdat_list;
+@@ -71,6 +79,9 @@
+
+ static void bad_page(const char *function, struct page *page)
+ {
++#ifdef XEN
++printk("bad_page: called but disabled\n");
++#else
+ printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
+ function, current->comm, page);
+ printk(KERN_EMERG "flags:0x%08lx mapping:%p mapcount:%d count:%d\n",
+@@ -91,6 +102,7 @@
+ set_page_count(page, 0);
+ page->mapping = NULL;
+ page->mapcount = 0;
++#endif
+ }
+
+ #ifndef CONFIG_HUGETLB_PAGE
+@@ -218,6 +230,7 @@
+
+ static inline void free_pages_check(const char *function, struct page *page)
+ {
++#ifndef XEN
+ if ( page_mapped(page) ||
+ page->mapping != NULL ||
+ page_count(page) != 0 ||
+@@ -233,6 +246,7 @@
+ 1 << PG_swapcache |
+ 1 << PG_writeback )))
+ bad_page(function, page);
++#endif
+ if (PageDirty(page))
+ ClearPageDirty(page);
+ }
+@@ -276,6 +290,9 @@
+
+ void __free_pages_ok(struct page *page, unsigned int order)
+ {
++#ifdef XEN
++printk("__free_pages_ok: called but disabled\n");
++#else
+ LIST_HEAD(list);
+ int i;
+
+@@ -285,6 +302,7 @@
+ list_add(&page->lru, &list);
+ kernel_map_pages(page, 1<<order, 0);
+ free_pages_bulk(page_zone(page), 1, &list, order);
++#endif
+ }
+
+ #define MARK_USED(index, order, area) \
+@@ -330,6 +348,7 @@
+ */
+ static void prep_new_page(struct page *page, int order)
+ {
++#ifndef XEN
+ if (page->mapping || page_mapped(page) ||
+ (page->flags & (
+ 1 << PG_private |
+@@ -343,11 +362,14 @@
+ 1 << PG_swapcache |
+ 1 << PG_writeback )))
+ bad_page(__FUNCTION__, page);
++#endif
+
+ page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
+ 1 << PG_referenced | 1 << PG_arch_1 |
+ 1 << PG_checked | 1 << PG_mappedtodisk);
++#ifndef XEN
+ page->private = 0;
++#endif
+ set_page_refs(page, order);
+ }
+
+@@ -590,13 +612,17 @@
+ unsigned long min;
+ struct zone **zones;
+ struct page *page;
++#ifndef XEN
+ struct reclaim_state reclaim_state;
++#endif
+ struct task_struct *p = current;
+ int i;
+ int alloc_type;
+ int do_retry;
+
++#ifndef XEN
+ might_sleep_if(wait);
++#endif
+
+ zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
+ if (zones[0] == NULL) /* no zones in the zonelist */
+@@ -610,12 +636,14 @@
+
+ min = (1<<order) + z->protection[alloc_type];
+
++#ifndef XEN
+ /*
+ * We let real-time tasks dip their real-time paws a little
+ * deeper into reserves.
+ */
+ if (rt_task(p))
+ min -= z->pages_low >> 1;
++#endif
+
+ if (z->free_pages >= min ||
+ (!wait && z->free_pages >= z->pages_high)) {
+@@ -627,9 +655,11 @@
+ }
+ }
+
++#ifndef XEN
+ /* we're somewhat low on memory, failed to find what we needed */
+ for (i = 0; zones[i] != NULL; i++)
+ wakeup_kswapd(zones[i]);
++#endif
+
+ /* Go through the zonelist again, taking __GFP_HIGH into account */
+ for (i = 0; zones[i] != NULL; i++) {
+@@ -639,8 +669,10 @@
+
+ if (gfp_mask & __GFP_HIGH)
+ min -= z->pages_low >> 2;
++#ifndef XEN
+ if (rt_task(p))
+ min -= z->pages_low >> 1;
++#endif
+
+ if (z->free_pages >= min ||
+ (!wait && z->free_pages >= z->pages_high)) {
+@@ -654,6 +686,7 @@
+
+ /* here we're in the low on memory slow path */
+
++#ifndef XEN
+ rebalance:
+ if ((p->flags & (PF_MEMALLOC | PF_MEMDIE)) && !in_interrupt()) {
+ /* go through the zonelist yet again, ignoring mins */
+@@ -681,6 +714,7 @@
+
+ p->reclaim_state = NULL;
+ p->flags &= ~PF_MEMALLOC;
++#endif
+
+ /* go through the zonelist yet one more time */
+ for (i = 0; zones[i] != NULL; i++) {
+@@ -698,6 +732,11 @@
+ }
+ }
+
++#ifdef XEN
++printk(KERN_WARNING "%s: page allocation failure."
++ " order:%d, mode:0x%x\n",
++ "(xen tasks have no comm)", order, gfp_mask);
++#else
+ /*
+ * Don't let big-order allocations loop unless the caller explicitly
+ * requests that. Wait for some write requests to complete then retry.
+@@ -724,6 +763,7 @@
+ p->comm, order, gfp_mask);
+ dump_stack();
+ }
++#endif
+ return NULL;
+ got_pg:
+ kernel_map_pages(page, 1 << order, 1);
+@@ -808,6 +848,7 @@
+
+ EXPORT_SYMBOL(get_zeroed_page);
+
++#ifndef XEN
+ void __pagevec_free(struct pagevec *pvec)
+ {
+ int i = pagevec_count(pvec);
+@@ -815,10 +856,15 @@
+ while (--i >= 0)
+ free_hot_cold_page(pvec->pages[i], pvec->cold);
+ }
++#endif
+
+ fastcall void __free_pages(struct page *page, unsigned int order)
+ {
++#ifdef XEN
++ if (!PageReserved(page)) {
++#else
+ if (!PageReserved(page) && put_page_testzero(page)) {
++#endif
+ if (order == 0)
+ free_hot_page(page);
+ else
+@@ -914,6 +960,13 @@
+ return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
+ }
+
++#ifdef XEN
++unsigned int nr_free_highpages (void)
++{
++printf("nr_free_highpages: called but not implemented\n");
++}
++#endif
++
+ #ifdef CONFIG_HIGHMEM
+ unsigned int nr_free_highpages (void)
+ {
+@@ -1022,6 +1075,7 @@
+
+ void si_meminfo(struct sysinfo *val)
+ {
++#ifndef XEN
+ val->totalram = totalram_pages;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages();
+@@ -1034,6 +1088,7 @@
+ val->freehigh = 0;
+ #endif
+ val->mem_unit = PAGE_SIZE;
++#endif
+ }
+
+ EXPORT_SYMBOL(si_meminfo);
+@@ -1165,7 +1220,9 @@
+ printk("= %lukB\n", K(total));
+ }
+
++#ifndef XEN
+ show_swap_cache_info();
++#endif
+ }
+
+ /*
+@@ -1530,6 +1587,9 @@
+ zone->wait_table_size = wait_table_size(size);
+ zone->wait_table_bits =
+ wait_table_bits(zone->wait_table_size);
++#ifdef XEN
++//printf("free_area_init_core-1: calling alloc_bootmem_node(%lx,%lx)\n",pgdat,zone->wait_table_size * sizeof(wait_queue_head_t));
++#endif
+ zone->wait_table = (wait_queue_head_t *)
+ alloc_bootmem_node(pgdat, zone->wait_table_size
+ * sizeof(wait_queue_head_t));
+@@ -1584,6 +1644,9 @@
+ */
+ bitmap_size = (size-1) >> (i+4);
+ bitmap_size = LONG_ALIGN(bitmap_size+1);
++#ifdef XEN
++//printf("free_area_init_core-2: calling alloc_bootmem_node(%lx,%lx)\n",pgdat, bitmap_size);
++#endif
+ zone->free_area[i].map =
+ (unsigned long *) alloc_bootmem_node(pgdat, bitmap_size);
+ }
+@@ -1601,6 +1664,9 @@
+ calculate_zone_totalpages(pgdat, zones_size, zholes_size);
+ if (!node_mem_map) {
+ size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
++#ifdef XEN
++//printf("free_area_init_node: calling alloc_bootmem_node(%lx,%lx)\n",pgdat,size);
++#endif
+ node_mem_map = alloc_bootmem_node(pgdat, size);
+ }
+ pgdat->node_mem_map = node_mem_map;
+@@ -1784,6 +1850,7 @@
+
+ #endif /* CONFIG_PROC_FS */
+
++#ifndef XEN
+ #ifdef CONFIG_HOTPLUG_CPU
+ static int page_alloc_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+@@ -2011,3 +2078,4 @@
+ setup_per_zone_protection();
+ return 0;
+ }
++#endif
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/processor.h 2005-01-23 13:23:36.000000000 -0700
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/processor.h 2004-08-25 19:28:13.000000000 -0600
+@@ -406,12 +406,16 @@
+ */
+
+ /* Return TRUE if task T owns the fph partition of the CPU we're running on. */
++#ifdef XEN
++#define ia64_is_local_fpu_owner(t) 0
++#else
+ #define ia64_is_local_fpu_owner(t) \
+ ({ \
+ struct task_struct *__ia64_islfo_task = (t); \
+ (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
+ && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
+ })
++#endif
+
+ /* Mark task T as owning the fph partition of the CPU we're running on. */
+ #define ia64_set_local_fpu_owner(t) do { \
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/sal.h 2004-06-15 23:20:04.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/sal.h 2004-10-27 13:55:23.000000000 -0600
+@@ -646,7 +646,23 @@
+ {
+ struct ia64_sal_retval isrv;
+
++//#ifdef XEN
++#if 0
++ unsigned long *x = (unsigned long *)ia64_sal;
++ unsigned long *inst = (unsigned long *)*x;
++ unsigned long __ia64_sc_flags;
++ struct ia64_fpreg __ia64_sc_fr[6];
++printf("ia64_sal_freq_base: about to save_scratch_fpregs\n");
++ ia64_save_scratch_fpregs(__ia64_sc_fr);
++ spin_lock_irqsave(&sal_lock, __ia64_sc_flags);
++printf("ia64_sal_freq_base: about to call, ia64_sal=%p, ia64_sal[0]=%p, ia64_sal[1]=%p\n",x,x[0],x[1]);
++printf("first inst=%p,%p\n",inst[0],inst[1]);
++ isrv = (*ia64_sal)(SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
++ spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags);
++ ia64_load_scratch_fpregs(__ia64_sc_fr);
++#else
+ SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
++#endif
+ *ticks_per_second = isrv.v0;
+ *drift_info = isrv.v1;
+ return isrv.status;
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/setup.c 2004-06-15 23:18:58.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/setup.c 2004-11-11 17:08:30.000000000 -0700
+@@ -21,6 +21,9 @@
+ #include <linux/init.h>
+
+ #include <linux/acpi.h>
++#ifdef XEN
++#include <xen/sched.h>
++#endif
+ #include <linux/bootmem.h>
+ #include <linux/console.h>
+ #include <linux/delay.h>
+@@ -30,13 +33,17 @@
+ #include <linux/seq_file.h>
+ #include <linux/string.h>
+ #include <linux/threads.h>
++#ifndef XEN
+ #include <linux/tty.h>
+ #include <linux/serial.h>
+ #include <linux/serial_core.h>
++#endif
+ #include <linux/efi.h>
+ #include <linux/initrd.h>
+
++#ifndef XEN
+ #include <asm/ia32.h>
++#endif
+ #include <asm/machvec.h>
+ #include <asm/mca.h>
+ #include <asm/meminit.h>
+@@ -50,6 +57,11 @@
+ #include <asm/smp.h>
+ #include <asm/system.h>
+ #include <asm/unistd.h>
++#ifdef XEN
++#include <linux/mm.h>
++#include <asm/mmu_context.h>
++extern unsigned long loops_per_jiffy; // from linux/init/main.c
++#endif
+
+ #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
+ # error "struct cpuinfo_ia64 too big!"
+@@ -65,7 +77,9 @@
+ DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
+ unsigned long ia64_cycles_per_usec;
+ struct ia64_boot_param *ia64_boot_param;
++#ifndef XEN
+ struct screen_info screen_info;
++#endif
+
+ unsigned long ia64_max_cacheline_size;
+ unsigned long ia64_iobase; /* virtual address for I/O accesses */
+@@ -98,7 +112,6 @@
+ struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
+ int num_rsvd_regions;
+
+-
+ /*
+ * Filter incoming memory segments based on the primitive map created from the boot
+ * parameters. Segments contained in the map are removed from the memory ranges. A
+@@ -285,7 +298,9 @@
+ {
+ unw_init();
+
++#ifndef XEN
+ ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
++#endif
+
+ *cmdline_p = __va(ia64_boot_param->command_line);
+ strlcpy(saved_command_line, *cmdline_p, sizeof(saved_command_line));
+@@ -297,6 +312,10 @@
+ machvec_init(acpi_get_sysname());
+ #endif
+
++#ifdef XEN
++#undef CONFIG_ACPI_BOOT
++#endif
++
+ #ifdef CONFIG_ACPI_BOOT
+ /* Initialize the ACPI boot-time table parser */
+ acpi_table_init();
+@@ -413,6 +432,9 @@
+ sprintf(cp, " 0x%lx", mask);
+ }
+
++#ifdef XEN
++#define seq_printf(a,b...) printf(b)
++#endif
+ seq_printf(m,
+ "processor : %d\n"
+ "vendor : %s\n"
+@@ -667,6 +689,8 @@
+ void
+ check_bugs (void)
+ {
++#ifndef XEN
+ ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
+ (unsigned long) __end___mckinley_e9_bundles);
++#endif
+ }
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/mm/slab.c 2004-06-15 23:19:44.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/slab.c 2004-12-17 13:47:03.000000000 -0700
+@@ -86,15 +86,30 @@
+ #include <linux/init.h>
+ #include <linux/compiler.h>
+ #include <linux/seq_file.h>
++#ifndef XEN
+ #include <linux/notifier.h>
+ #include <linux/kallsyms.h>
+ #include <linux/cpu.h>
+ #include <linux/sysctl.h>
+ #include <linux/module.h>
++#endif
+
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
++#ifndef XEN
+ #include <asm/tlbflush.h>
++#endif
++
++#ifdef XEN
++#define lock_cpu_hotplug() do { } while (0)
++#define unlock_cpu_hotplug() do { } while (0)
++#define might_sleep_if(x) do { } while (0)
++#define dump_stack() do { } while (0)
++#define start_cpu_timer(cpu) do { } while (0)
++static inline void __down(struct semaphore *sem) { }
++static inline void __up(struct semaphore *sem) { }
++static inline void might_sleep(void) { }
++#endif
+
+ /*
+ * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
+@@ -530,7 +545,9 @@
+ FULL
+ } g_cpucache_up;
+
++#ifndef XEN
+ static DEFINE_PER_CPU(struct timer_list, reap_timers);
++#endif
+
+ static void reap_timer_fnc(unsigned long data);
+ static void free_block(kmem_cache_t* cachep, void** objpp, int len);
+@@ -588,6 +605,7 @@
+ * Add the CPU number into the expiry time to minimize the possibility of the
+ * CPUs getting into lockstep and contending for the global cache chain lock.
+ */
++#ifndef XEN
+ static void __devinit start_cpu_timer(int cpu)
+ {
+ struct timer_list *rt = &per_cpu(reap_timers, cpu);
+@@ -600,6 +618,7 @@
+ add_timer_on(rt, cpu);
+ }
+ }
++#endif
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ static void stop_cpu_timer(int cpu)
+@@ -634,6 +653,7 @@
+ return nc;
+ }
+
++#ifndef XEN
+ static int __devinit cpuup_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+@@ -693,6 +713,7 @@
+ }
+
+ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
++#endif
+
+ /* Initialisation.
+ * Called after the gfp() functions have been enabled, and before smp_init().
+@@ -805,10 +826,14 @@
+ /* Done! */
+ g_cpucache_up = FULL;
+
++#ifdef XEN
++printk("kmem_cache_init: some parts commented out, ignored\n");
++#else
+ /* Register a cpu startup notifier callback
+ * that initializes ac_data for all new cpus
+ */
+ register_cpu_notifier(&cpucache_notifier);
++#endif
+
+
+ /* The reap timers are started later, with a module init call:
+@@ -886,8 +911,10 @@
+ page++;
+ }
+ sub_page_state(nr_slab, nr_freed);
++#ifndef XEN
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += nr_freed;
++#endif
+ free_pages((unsigned long)addr, cachep->gfporder);
+ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
+ atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages);
+@@ -1363,8 +1390,10 @@
+ + cachep->num;
+ }
+
++#ifndef XEN
+ cachep->lists.next_reap = jiffies + REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
++#endif
+
+ /* Need the semaphore to access the chain. */
+ down(&cache_chain_sem);
+@@ -2237,8 +2266,10 @@
+
+ if (unlikely(addr < min_addr))
+ goto out;
++#ifndef XEN
+ if (unlikely(addr > (unsigned long)high_memory - size))
+ goto out;
++#endif
+ if (unlikely(addr & align_mask))
+ goto out;
+ if (unlikely(!kern_addr_valid(addr)))
+@@ -2769,6 +2800,7 @@
+ */
+ static void reap_timer_fnc(unsigned long cpu)
+ {
++#ifndef XEN
+ struct timer_list *rt = &__get_cpu_var(reap_timers);
+
+ /* CPU hotplug can drag us off cpu: don't run on wrong CPU */
+@@ -2776,6 +2808,7 @@
+ cache_reap();
+ mod_timer(rt, jiffies + REAPTIMEOUT_CPUC + cpu);
+ }
++#endif
+ }
+
+ #ifdef CONFIG_PROC_FS
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/slab.h 2004-06-15 23:20:26.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/slab.h 2004-08-25 19:28:13.000000000 -0600
+@@ -83,7 +83,11 @@
+ goto found; \
+ else \
+ i++;
++#ifdef XEN
++#include <linux/kmalloc_sizes.h>
++#else
+ #include "kmalloc_sizes.h"
++#endif
+ #undef CACHE
+ {
+ extern void __you_cannot_kmalloc_that_much(void);
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/system.h 2005-01-23 13:23:36.000000000 -0700
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/system.h 2004-09-17 18:27:22.000000000 -0600
+@@ -24,8 +24,16 @@
+ * 0xa000000000000000+2*PERCPU_PAGE_SIZE
+ * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
+ */
++#ifdef XEN
++//#define KERNEL_START 0xfffc000100000000
++#define KERNEL_START 0xfffc000004000000
++#define PERCPU_ADDR 0xfffd000000000000-PERCPU_PAGE_SIZE
++#define SHAREDINFO_ADDR 0xfffd000000000000
++#define VHPT_ADDR 0xfffe000000000000
++#else
+ #define KERNEL_START 0xa000000100000000
+ #define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
++#endif
+
+ #ifndef __ASSEMBLY__
+
+@@ -218,9 +226,13 @@
+ # define PERFMON_IS_SYSWIDE() (0)
+ #endif
+
++#ifdef XEN
++#define IA64_HAS_EXTRA_STATE(t) 0
++#else
+ #define IA64_HAS_EXTRA_STATE(t) \
+ ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
+ || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
++#endif
+
+ #define __switch_to(prev,next,last) do { \
+ if (IA64_HAS_EXTRA_STATE(prev)) \
+@@ -249,6 +261,9 @@
+ #else
+ # define switch_to(prev,next,last) __switch_to(prev, next, last)
+ #endif
++#ifdef XEN
++#undef switch_to
++#endif
+
+ /*
+ * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch,
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/time.c 2004-06-15 23:19:01.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/time.c 2004-11-23 17:25:18.000000000 -0700
+@@ -10,16 +10,22 @@
+ */
+ #include <linux/config.h>
+
++#ifndef XEN
+ #include <linux/cpu.h>
++#endif
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#ifndef XEN
+ #include <linux/profile.h>
++#endif
+ #include <linux/sched.h>
+ #include <linux/time.h>
+ #include <linux/interrupt.h>
+ #include <linux/efi.h>
++#ifndef XEN
+ #include <linux/profile.h>
++#endif
+ #include <linux/timex.h>
+
+ #include <asm/machvec.h>
+@@ -29,6 +35,9 @@
+ #include <asm/sal.h>
+ #include <asm/sections.h>
+ #include <asm/system.h>
++#ifdef XEN
++#include <asm/ia64_int.h>
++#endif
+
+ extern unsigned long wall_jiffies;
+
+@@ -45,6 +54,59 @@
+
+ #endif
+
++#ifdef XEN
++volatile unsigned long last_nsec_offset;
++extern rwlock_t xtime_lock;
++unsigned long cpu_khz; /* Detected as we calibrate the TSC */
++static s_time_t stime_irq; /* System time at last 'time update' */
++
++static inline u64 get_time_delta(void)
++{
++ printf("get_time_delta: called, not implemented\n");
++ return 0;
++}
++
++s_time_t get_s_time(void)
++{
++ s_time_t now;
++ unsigned long flags;
++
++ read_lock_irqsave(&xtime_lock, flags);
++
++ now = stime_irq + get_time_delta();
++
++ /* Ensure that the returned system time is monotonically increasing. */
++ {
++ static s_time_t prev_now = 0;
++ if ( unlikely(now < prev_now) )
++ now = prev_now;
++ prev_now = now;
++ }
++
++ read_unlock_irqrestore(&xtime_lock, flags);
++
++ return now;
++}
++
++void update_dom_time(struct domain *d)
++{
++// FIXME: implement this?
++ printf("update_dom_time: called, not implemented, skipping\n");
++}
++
++/* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
++void do_settime(unsigned long secs, unsigned long usecs, u64 system_time_base)
++{
++// FIXME: Should this be do_settimeofday (from linux)???
++ printf("do_settime: called, not implemented, stopping\n");
++ dummy();
++}
++#endif
++
++#if 0 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
++#endif /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
++
++#ifndef XEN
+ static void
+ itc_reset (void)
+ {
+@@ -80,12 +142,15 @@
+ return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT;
+ }
+
++#ifndef XEN
+ static struct time_interpolator itc_interpolator = {
+ .get_offset = itc_get_offset,
+ .update = itc_update,
+ .reset = itc_reset
+ };
++#endif
+
++#ifndef XEN
+ int
+ do_settimeofday (struct timespec *tv)
+ {
+@@ -95,7 +160,9 @@
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
+ write_seqlock_irq(&xtime_lock);
++#endif
+ {
+ /*
+ * This is revolting. We need to set "xtime" correctly. However, the value
+@@ -117,12 +184,15 @@
+ time_esterror = NTP_PHASE_LIMIT;
+ time_interpolator_reset();
+ }
++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
+ write_sequnlock_irq(&xtime_lock);
++#endif
+ clock_was_set();
+ return 0;
+ }
+
+ EXPORT_SYMBOL(do_settimeofday);
++#endif
+
+ void
+ do_gettimeofday (struct timeval *tv)
+@@ -185,6 +255,7 @@
+ }
+
+ EXPORT_SYMBOL(do_gettimeofday);
++#endif
+
+ /*
+ * The profiling function is SMP safe. (nothing can mess
+@@ -195,6 +266,9 @@
+ static inline void
+ ia64_do_profile (struct pt_regs * regs)
+ {
++#ifdef XEN
++}
++#else
+ unsigned long ip, slot;
+ extern cpumask_t prof_cpu_mask;
+
+@@ -231,24 +305,88 @@
+ ip = prof_len-1;
+ atomic_inc((atomic_t *)&prof_buffer[ip]);
+ }
++#endif
++
++#ifdef XEN
++unsigned long domain0_ready = 0; // FIXME (see below)
++#define typecheck(a,b) 1
++/* FROM linux/include/linux/jiffies.h */
++/*
++ * These inlines deal with timer wrapping correctly. You are
++ * strongly encouraged to use them
++ * 1. Because people otherwise forget
++ * 2. Because if the timer wrap changes in future you won't have to
++ * alter your driver code.
++ *
++ * time_after(a,b) returns true if the time a is after time b.
++ *
++ * Do this with "<0" and ">=0" to only test the sign of the result. A
++ * good compiler would generate better code (and a really good compiler
++ * wouldn't care). Gcc is currently neither.
++ */
++#define time_after(a,b) \
++ (typecheck(unsigned long, a) && \
++ typecheck(unsigned long, b) && \
++ ((long)(b) - (long)(a) < 0))
++#define time_before(a,b) time_after(b,a)
++
++#define time_after_eq(a,b) \
++ (typecheck(unsigned long, a) && \
++ typecheck(unsigned long, b) && \
++ ((long)(a) - (long)(b) >= 0))
++#define time_before_eq(a,b) time_after_eq(b,a)
++#endif
+
+ static irqreturn_t
+ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+ {
+ unsigned long new_itm;
+
++#ifndef XEN
+ if (unlikely(cpu_is_offline(smp_processor_id()))) {
+ return IRQ_HANDLED;
+ }
++#endif
++#ifdef XEN
++ if (current->domain == dom0) {
++ // FIXME: there's gotta be a better way of doing this...
++ // We have to ensure that domain0 is launched before we
++ // call vcpu_timer_expired on it
++ //domain0_ready = 1; // moved to xensetup.c
++ }
++ if (domain0_ready && vcpu_timer_expired(dom0->exec_domain[0])) {
++ vcpu_pend_timer(dom0->exec_domain[0]);
++ vcpu_set_next_timer(dom0->exec_domain[0]);
++ domain_wake(dom0->exec_domain[0]);
++ }
++ if (!is_idle_task(current->domain) && current->domain != dom0) {
++ if (vcpu_timer_expired(current)) {
++ vcpu_pend_timer(current);
++ // ensure another timer interrupt happens even if domain doesn't
++ vcpu_set_next_timer(current);
++ domain_wake(current);
++ }
++ }
++#endif
+
++#ifndef XEN
+ platform_timer_interrupt(irq, dev_id, regs);
++#endif
+
+ new_itm = local_cpu_data->itm_next;
+
+ if (!time_after(ia64_get_itc(), new_itm))
++#ifdef XEN
++ return;
++#else
+ printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
+ ia64_get_itc(), new_itm);
++#endif
+
++#ifdef XEN
++// printf("GOT TO HERE!!!!!!!!!!!\n");
++ //while(1);
++#endif
+ ia64_do_profile(regs);
+
+ while (1) {
+@@ -269,10 +407,16 @@
+ * another CPU. We need to avoid to SMP race by acquiring the
+ * xtime_lock.
+ */
++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
+ write_seqlock(&xtime_lock);
++#endif
++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
+ do_timer(regs);
++#endif
+ local_cpu_data->itm_next = new_itm;
++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
+ write_sequnlock(&xtime_lock);
++#endif
+ } else
+ local_cpu_data->itm_next = new_itm;
+
+@@ -292,7 +436,12 @@
+ */
+ while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
+ new_itm += local_cpu_data->itm_delta;
++//#ifdef XEN
++// vcpu_set_next_timer(current);
++//#else
++//printf("***** timer_interrupt: Setting itm to %lx\n",new_itm);
+ ia64_set_itm(new_itm);
++//#endif
+ /* double check, in case we got hit by a (slow) PMI: */
+ } while (time_after_eq(ia64_get_itc(), new_itm));
+ return IRQ_HANDLED;
+@@ -307,6 +456,7 @@
+ int cpu = smp_processor_id();
+ unsigned long shift = 0, delta;
+
++printf("ia64_cpu_local_tick: about to call ia64_set_itv\n");
+ /* arrange for the cycle counter to generate a timer interrupt: */
+ ia64_set_itv(IA64_TIMER_VECTOR);
+
+@@ -320,6 +470,7 @@
+ shift = (2*(cpu - hi) + 1) * delta/hi/2;
+ }
+ local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
++printf("***** ia64_cpu_local_tick: Setting itm to %lx\n",local_cpu_data->itm_next);
+ ia64_set_itm(local_cpu_data->itm_next);
+ }
+
+@@ -335,6 +486,7 @@
+ * frequency and then a PAL call to determine the frequency ratio between the ITC
+ * and the base frequency.
+ */
++
+ status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+ &platform_base_freq, &platform_base_drift);
+ if (status != 0) {
+@@ -384,9 +536,11 @@
+ + itc_freq/2)/itc_freq;
+
+ if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
++#ifndef XEN
+ itc_interpolator.frequency = local_cpu_data->itc_freq;
+ itc_interpolator.drift = itc_drift;
+ register_time_interpolator(&itc_interpolator);
++#endif
+ }
+
+ /* Setup the CPU local timer tick */
+@@ -395,7 +549,9 @@
+
+ static struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
++#ifndef XEN
+ .flags = SA_INTERRUPT,
++#endif
+ .name = "timer"
+ };
+
+@@ -403,12 +559,16 @@
+ time_init (void)
+ {
+ register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
++#ifndef XEN
+ efi_gettimeofday(&xtime);
++#endif
+ ia64_init_itm();
+
++#ifndef XEN
+ /*
+ * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
+ * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
+ */
+ set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
++#endif
+ }
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/mm/tlb.c 2004-06-15 23:19:43.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/tlb.c 2004-08-25 19:28:12.000000000 -0600
+@@ -21,7 +21,9 @@
+ #include <asm/mmu_context.h>
+ #include <asm/pgalloc.h>
+ #include <asm/pal.h>
++#ifndef XEN
+ #include <asm/tlbflush.h>
++#endif
+
+ static struct {
+ unsigned long mask; /* mask of supported purge page-sizes */
+@@ -43,6 +45,9 @@
+ void
+ wrap_mmu_context (struct mm_struct *mm)
+ {
++#ifdef XEN
++printf("wrap_mmu_context: called, not implemented\n");
++#else
+ unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
+ struct task_struct *tsk;
+ int i;
+@@ -83,6 +88,7 @@
+ put_cpu();
+ }
+ local_flush_tlb_all();
++#endif
+ }
+
+ void
+@@ -132,6 +138,9 @@
+ void
+ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end)
+ {
++#ifdef XEN
++printf("flush_tlb_range: called, not implemented\n");
++#else
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long size = end - start;
+ unsigned long nbits;
+@@ -163,6 +172,7 @@
+ # endif
+
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
++#endif
+ }
+ EXPORT_SYMBOL(flush_tlb_range);
+
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/types.h 2004-06-15 23:19:01.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/types.h 2004-11-11 17:08:30.000000000 -0700
+@@ -1,5 +1,12 @@
+ #ifndef _ASM_IA64_TYPES_H
+ #define _ASM_IA64_TYPES_H
++#ifdef XEN
++#ifndef __ASSEMBLY__
++typedef unsigned long ssize_t;
++typedef unsigned long size_t;
++typedef long long loff_t;
++#endif
++#endif
+
+ /*
+ * This file is never included by application software unless explicitly requested (e.g.,
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/kernel/unaligned.c 2004-06-15 23:20:03.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/unaligned.c 2004-08-25 19:28:12.000000000 -0600
+@@ -15,8 +15,10 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
++#ifndef XEN
+ #include <linux/smp_lock.h>
+ #include <linux/tty.h>
++#endif
+
+ #include <asm/intrinsics.h>
+ #include <asm/processor.h>
+@@ -24,7 +26,16 @@
+ #include <asm/uaccess.h>
+ #include <asm/unaligned.h>
+
++#ifdef XEN
++#define ia64_peek(x...) printk("ia64_peek: called, not implemented\n")
++#define ia64_poke(x...) printk("ia64_poke: called, not implemented\n")
++#define ia64_sync_fph(x...) printk("ia64_sync_fph: called, not implemented\n")
++#define ia64_flush_fph(x...) printk("ia64_flush_fph: called, not implemented\n")
++#define die_if_kernel(x...) printk("die_if_kernel: called, not implemented\n")
++#define jiffies 0
++#else
+ extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
++#endif
+
+ #undef DEBUG_UNALIGNED_TRAP
+
+@@ -437,7 +448,11 @@
+ }
+
+
++#ifdef XEN
++void
++#else
+ static void
++#endif
+ setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
+ {
+ struct switch_stack *sw = (struct switch_stack *) regs - 1;
+@@ -611,7 +626,11 @@
+ }
+
+
++#ifdef XEN
++void
++#else
+ static void
++#endif
+ getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
+ {
+ struct switch_stack *sw = (struct switch_stack *) regs - 1;
+@@ -1298,7 +1317,9 @@
+ mm_segment_t old_fs = get_fs();
+ unsigned long bundle[2];
+ unsigned long opcode;
++#ifndef XEN
+ struct siginfo si;
++#endif
+ const struct exception_table_entry *eh = NULL;
+ union {
+ unsigned long l;
+@@ -1317,6 +1338,9 @@
+ * user-level unaligned accesses. Otherwise, a clever program could trick this
+ * handler into reading an arbitrary kernel addresses...
+ */
++#ifdef XEN
++printk("ia64_handle_unaligned: called, not working yet\n");
++#else
+ if (!user_mode(regs))
+ eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
+ if (user_mode(regs) || eh) {
+@@ -1353,6 +1377,7 @@
+
+ if (__copy_from_user(bundle, (void *) regs->cr_iip, 16))
+ goto failure;
++#endif
+
+ /*
+ * extract the instruction from the bundle given the slot number
+@@ -1493,6 +1518,7 @@
+ /* NOT_REACHED */
+ }
+ force_sigbus:
++#ifndef XEN
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRALN;
+@@ -1501,5 +1527,6 @@
+ si.si_isr = 0;
+ si.si_imm = 0;
+ force_sig_info(SIGBUS, &si, current);
++#endif
+ goto done;
+ }
--- /dev/null
+--- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/linux/wait.h 2004-06-15 23:19:31.000000000 -0600
++++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/linux/wait.h 2004-08-25 19:28:13.000000000 -0600
+@@ -104,10 +104,15 @@
+ list_del(&old->task_list);
+ }
+
++#ifdef XEN
++void FASTCALL(__wake_up(struct task_struct *p));
++#else
+ void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
++#endif
+ extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
+ extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
+
++#ifndef XEN
+ #define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
+ #define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
+ #define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
+@@ -117,6 +122,7 @@
+ #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
+ #define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
+ #define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
++#endif
+
+ #define __wait_event(wq, condition) \
+ do { \
--- /dev/null
+
+/*
+ * pervasive debugger
+ * www.cl.cam.ac.uk/netos/pdb
+ *
+ * alex ho
+ * 2004
+ * university of cambridge computer laboratory
+ *
+ * code adapted originally from kgdb, nemesis, & gdbserver
+ */
+
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <asm/ptrace.h>
+#include <xen/keyhandler.h>
+//#include <asm/apic.h>
+#include <asm/domain_page.h> /* [un]map_domain_mem */
+#include <asm/processor.h>
+#include <asm/pdb.h>
+#include <xen/list.h>
+#include <xen/serial.h>
+
+#define __PDB_GET_VAL 1
+#define __PDB_SET_VAL 2
+
+/*
+ * Read or write memory in an address space
+ */
+int pdb_change_values(u_char *buffer, int length,
+ unsigned long cr3, unsigned long addr, int rw)
+{
+ dummy();
+ return 0;
+}
+
+/*
+ * Set memory in a domain's address space
+ * Set "length" bytes at "address" from "domain" to the values in "buffer".
+ * Return the number of bytes set, 0 if there was a problem.
+ */
+
+int pdb_set_values(u_char *buffer, int length,
+ unsigned long cr3, unsigned long addr)
+{
+ int count = pdb_change_values(buffer, length, cr3, addr, __PDB_SET_VAL);
+ return count;
+}
+
+/*
+ * Read memory from a domain's address space.
+ * Fetch "length" bytes at "address" from "domain" into "buffer".
+ * Return the number of bytes read, 0 if there was a problem.
+ */
+
+int pdb_get_values(u_char *buffer, int length,
+ unsigned long cr3, unsigned long addr)
+{
+ return pdb_change_values(buffer, length, cr3, addr, __PDB_GET_VAL);
+}
+
*/
#include <asm/privop.h>
-#include <asm/privify.h>
#include <asm/vcpu.h>
#include <asm/processor.h>
#include <asm/delay.h> // Debug only
**************************************************************************/
-void build_hypercall_bundle(UINT64 *imva, UINT64 breakimm, UINT64 hypnum, UINT64 ret)
+void build_hypercall_bundle(UINT64 *imva, UINT64 brkimm, UINT64 hypnum, UINT64 ret)
{
INST64_A5 slot0;
INST64_I19 slot1;
slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
slot0.imm5c = hypnum >> 16; slot0.s = 0;
- // slot1: break breakimm
+ // slot1: break brkimm
slot1.inst = 0;
slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
- slot1.imm20 = breakimm; slot1.i = breakimm >> 20;
+ slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
// if ret slot2: br.ret.sptk.many rp
// else slot2: br.cond.sptk.many rp
slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/desc.h>
-#include <asm/ldt.h>
+//#include <asm/ldt.h>
#include <xen/irq.h>
#include <xen/event.h>
#include <asm/regionreg.h>
#include <asm/hpsim_ssc.h>
#include <asm/dom_fw.h>
+extern unsigned long vcpu_get_itir_on_fault(struct exec_domain *, UINT64);
extern struct ia64_sal_retval pal_emulator_static(UINT64);
extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
-#define PSCB(x) x->shared_info->arch
+#define PSCB(x,y) x->vcpu_info->arch.y
extern unsigned long vcpu_verbose;
return 0;
}
-void schedule_tail(struct domain *next)
+void schedule_tail(struct exec_domain *next)
{
unsigned long rr7;
- printk("current=%lx,shared_info=%lx\n",current,current->shared_info);
- printk("next=%lx,shared_info=%lx\n",next,next->shared_info);
+ printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
+ printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
if (rr7 = load_region_regs(current)) {
printk("schedule_tail: change to rr7 not yet implemented\n");
}
}
-extern TR_ENTRY *match_tr(struct domain *d, unsigned long ifa);
+extern TR_ENTRY *match_tr(struct exec_domain *ed, unsigned long ifa);
void tdpfoo(void) { }
unsigned long translate_domain_pte(unsigned long pteval,
unsigned long address, unsigned long itir)
{
- struct domain *d = (struct domain *) current;
+ struct domain *d = current->domain;
unsigned long mask, pteval2, mpaddr;
unsigned long lookup_domain_mpa(struct domain *,unsigned long);
extern struct domain *dom0;
extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
unsigned long pteval;
- if (current == dom0) {
+ if (current->domain == dom0) {
if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
tdpfoo();
}
}
- pteval = lookup_domain_mpa(current,mpaddr);
+ pteval = lookup_domain_mpa(current->domain,mpaddr);
return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
}
void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
{
- unsigned long vcpu_get_ipsr_int_state(struct domain *,unsigned long);
- unsigned long vcpu_get_rr_ve(struct domain *,unsigned long);
- unsigned long vcpu_get_itir_on_fault(struct domain *,unsigned long);
- struct domain *d = (struct domain *) current;
+ unsigned long vcpu_get_ipsr_int_state(struct exec_domain *,unsigned long);
+ unsigned long vcpu_get_rr_ve(struct exec_domain *,unsigned long);
+ struct domain *d = current->domain;
+ struct exec_domain *ed = current;
if (vector == IA64_EXTINT_VECTOR) {
first_extint = 0;
}
}
- if (!PSCB(d).interrupt_collection_enabled) {
- if (!(PSCB(d).ipsr & IA64_PSR_DT)) {
+ if (!PSCB(ed,interrupt_collection_enabled)) {
+ if (!(PSCB(ed,ipsr) & IA64_PSR_DT)) {
printf("psr.dt off, trying to deliver nested dtlb!\n");
while(1);
}
if (vector != IA64_DATA_TLB_VECTOR &&
vector != IA64_DATA_TLB_VECTOR) {
printf("psr.ic off, delivering fault=%lx,iip=%p,isr=%p,PSCB.iip=%p\n",
- vector,regs->cr_iip,isr,PSCB(d).iip);
+ vector,regs->cr_iip,isr,PSCB(ed,iip));
while(1);
}
//printf("Delivering NESTED DATA TLB fault\n");
vector = IA64_DATA_NESTED_TLB_VECTOR;
- regs->cr_iip = ((unsigned long) PSCB(d).iva + vector) & ~0xffUL;
+ regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
// NOTE: nested trap must NOT pass PSCB address
- //regs->r31 = (unsigned long) &PSCB(d);
+ //regs->r31 = (unsigned long) &PSCB(ed);
return;
}
- if ((vector & 0xf) != IA64_FORCED_IFA) PSCB(d).ifa = ifa;
- else ifa = PSCB(d).ifa;
+ if ((vector & 0xf) != IA64_FORCED_IFA) PSCB(ed,ifa) = ifa;
+ else ifa = PSCB(ed,ifa);
vector &= ~0xf;
// always deliver on ALT vector (for now?) because no VHPT
-// if (!vcpu_get_rr_ve(d,ifa)) {
+// if (!vcpu_get_rr_ve(ed,ifa)) {
if (vector == IA64_DATA_TLB_VECTOR)
vector = IA64_ALT_DATA_TLB_VECTOR;
else if (vector == IA64_INST_TLB_VECTOR)
vector = IA64_ALT_INST_TLB_VECTOR;
// }
- PSCB(d).unat = regs->ar_unat; // not sure if this is really needed?
- PSCB(d).precover_ifs = regs->cr_ifs;
- vcpu_bsw0(d);
- PSCB(d).ipsr = vcpu_get_ipsr_int_state(d,regs->cr_ipsr);
+ PSCB(ed,unat) = regs->ar_unat; // not sure if this is really needed?
+ PSCB(ed,precover_ifs) = regs->cr_ifs;
+ vcpu_bsw0(ed);
+ PSCB(ed,ipsr) = vcpu_get_ipsr_int_state(ed,regs->cr_ipsr);
if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
- PSCB(d).iim = itiriim;
- else PSCB(d).itir = vcpu_get_itir_on_fault(d,ifa);
- PSCB(d).isr = isr; // this is unnecessary except for interrupts!
- PSCB(d).iip = regs->cr_iip;
- PSCB(d).ifs = 0;
- PSCB(d).incomplete_regframe = 0;
-
- regs->cr_iip = ((unsigned long) PSCB(d).iva + vector) & ~0xffUL;
+ PSCB(ed,iim) = itiriim;
+ else PSCB(ed,itir) = vcpu_get_itir_on_fault(ed,ifa);
+ PSCB(ed,isr) = isr; // this is unnecessary except for interrupts!
+ PSCB(ed,iip) = regs->cr_iip;
+ PSCB(ed,ifs) = 0;
+ PSCB(ed,incomplete_regframe) = 0;
+
+ regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
// FIXME: NEED TO PASS PSCB, BUT **NOT** IN R31 WHICH IS BEING USED FOR ar.pr
// IN ANY CASE, PASS PINNED ADDRESS, NOT THIS ONE
- //regs->r31 = (unsigned long) &PSCB(d);
+ //regs->r31 = (unsigned long) &PSCB(ed);
- PSCB(d).interrupt_delivery_enabled = 0;
- PSCB(d).interrupt_collection_enabled = 0;
+ PSCB(ed,interrupt_delivery_enabled) = 0;
+ PSCB(ed,interrupt_collection_enabled) = 0;
}
void foodpi(void) {}
// NEVER successful if already reflecting a trap/fault because psr.i==0
void deliver_pending_interrupt(struct pt_regs *regs)
{
- struct domain *d = (struct domain *) current;
+ struct domain *d = current->domain;
+ struct exec_domain *ed = current;
// FIXME: Will this work properly if doing an RFI???
if (!is_idle_task(d) && user_mode(regs)) {
- vcpu_poke_timer(d);
- if (vcpu_deliverable_interrupts(d)) {
+ vcpu_poke_timer(ed);
+ if (vcpu_deliverable_interrupts(ed)) {
unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
foodpi();
reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
}
}
-int handle_lazy_cover(struct domain *d, unsigned long isr, struct pt_regs *regs)
+int handle_lazy_cover(struct exec_domain *ed, unsigned long isr, struct pt_regs *regs)
{
- if (!PSCB(d).interrupt_collection_enabled) {
+ if (!PSCB(ed,interrupt_collection_enabled)) {
if (isr & IA64_ISR_IR) {
// printf("Handling lazy cover\n");
- PSCB(d).ifs = regs->cr_ifs;
- PSCB(d).incomplete_regframe = 1;
+ PSCB(ed,ifs) = regs->cr_ifs;
+ PSCB(ed,incomplete_regframe) = 1;
regs->cr_ifs = 0;
return(1); // retry same instruction with cr.ifs off
}
void xen_handle_domain_access(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
{
- struct domain *d = (struct domain *) current;
+ struct domain *d = (struct domain *) current->domain;
+ struct domain *ed = (struct exec_domain *) current;
TR_ENTRY *trp;
unsigned long psr = regs->cr_ipsr, mask, flags;
unsigned long iip = regs->cr_iip;
// got here trying to read a privop bundle
//if (d->metaphysical_mode) {
- if (d->metaphysical_mode && !(address>>61)) { //FIXME
+ if (PSCB(current,metaphysical_mode) && !(address>>61)) { //FIXME
if (d == dom0) {
if (address < dom0_start || address >= dom0_start + dom0_size) {
printk("xen_handle_domain_access: out-of-bounds"
pteval = lookup_domain_mpa(d,address);
//FIXME: check return value?
// would be nice to have a counter here
- vcpu_itc_no_srlz(d,2,address,pteval,PAGE_SHIFT);
+ vcpu_itc_no_srlz(ed,2,address,pteval,PAGE_SHIFT);
return;
}
if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
while(1);
}
- fault = vcpu_tpa(d,address,&mpaddr);
+ fault = vcpu_tpa(ed,address,&mpaddr);
if (fault != IA64_NO_FAULT) {
// this is hardcoded to handle __get_domain_bundle only
regs->r8 = 0; regs->r9 = 0;
// would be nice to have a counter here
//printf("Handling privop data TLB miss\n");
// FIXME, must be inlined or potential for nested fault here!
- vcpu_itc_no_srlz(d,2,address,pteval,PAGE_SHIFT);
+ vcpu_itc_no_srlz(ed,2,address,pteval,PAGE_SHIFT);
}
void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
{
- struct domain *d = (struct domain *) current;
+ struct domain *d = (struct domain *) current->domain;
TR_ENTRY *trp;
unsigned long psr = regs->cr_ipsr, mask, flags;
unsigned long iip = regs->cr_iip;
// FIXME: no need to pass itir in to this routine as we need to
// compute the virtual itir anyway (based on domain's RR.ps)
// AND ACTUALLY reflect_interruption doesn't use it anyway!
- itir = vcpu_get_itir_on_fault(d,address);
+ itir = vcpu_get_itir_on_fault(current,address);
- if (d->metaphysical_mode && (is_data || !(address>>61))) { //FIXME
+ if (PSCB(current,metaphysical_mode) && (is_data || !(address>>61))) { //FIXME
// FIXME should validate mpaddr here
if (d == dom0) {
if (address < dom0_start || address >= dom0_start + dom0_size) {
printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
- printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,d->shared_info->arch.iip);
+ printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,current->vcpu_info->arch.iip);
tdpfoo();
}
}
pteval = lookup_domain_mpa(d,address);
// FIXME, must be inlined or potential for nested fault here!
- vcpu_itc_no_srlz(d,is_data?2:1,address,pteval,PAGE_SHIFT);
+ vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,PAGE_SHIFT);
return;
}
- if (trp = match_tr(d,address)) {
+ if (trp = match_tr(current,address)) {
// FIXME address had better be pre-validated on insert
pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
- vcpu_itc_no_srlz(d,is_data?2:1,address,pteval,(trp->itir>>2)&0x3f);
+ vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,(trp->itir>>2)&0x3f);
return;
}
vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
- if (handle_lazy_cover(d, isr, regs)) return;
+ if (handle_lazy_cover(current, isr, regs)) return;
if (!(address>>61)) { printf("ia64_do_page_fault: @%p???, iip=%p, itc=%p (spinning...)\n",address,iip,ia64_get_itc()); while(1); }
if ((isr & IA64_ISR_SP)
|| ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
{
static int first_time = 1;
- struct domain *d = (struct domain *) current;
+ struct domain *d = (struct domain *) current->domain;
+ struct exec_domain *ed = (struct domain *) current;
extern unsigned long running_on_sim;
if (first_time) {
regs->r10 = x.v1; regs->r11 = x.v2;
break;
case FW_HYPERCALL_SAL_CALL:
- x = sal_emulator(vcpu_get_gr(d,32),vcpu_get_gr(d,33),
- vcpu_get_gr(d,34),vcpu_get_gr(d,35),
- vcpu_get_gr(d,36),vcpu_get_gr(d,37),
- vcpu_get_gr(d,38),vcpu_get_gr(d,39));
+ x = sal_emulator(vcpu_get_gr(ed,32),vcpu_get_gr(ed,33),
+ vcpu_get_gr(ed,34),vcpu_get_gr(ed,35),
+ vcpu_get_gr(ed,36),vcpu_get_gr(ed,37),
+ vcpu_get_gr(ed,38),vcpu_get_gr(ed,39));
regs->r8 = x.status; regs->r9 = x.v0;
regs->r10 = x.v1; regs->r11 = x.v2;
break;
case FW_HYPERCALL_EFI_RESET_SYSTEM:
printf("efi.reset_system called ");
- if (current == dom0) {
+ if (current->domain == dom0) {
printf("(by dom0)\n ");
(*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
}
{
unsigned long *tv, *tc;
fooefi();
- tv = vcpu_get_gr(d,32);
- tc = vcpu_get_gr(d,33);
+ tv = vcpu_get_gr(ed,32);
+ tc = vcpu_get_gr(ed,33);
//printf("efi_get_time(%p,%p) called...",tv,tc);
tv = __va(translate_domain_mpaddr(tv));
if (tc) tc = __va(translate_domain_mpaddr(tc));
ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
{
IA64FAULT vector;
- struct domain *d = (struct domain *) current;
+ struct domain *d = current->domain;
+ struct exec_domain *ed = current;
// FIXME: no need to pass itir in to this routine as we need to
// compute the virtual itir anyway (based on domain's RR.ps)
// AND ACTUALLY reflect_interruption doesn't use it anyway!
- itir = vcpu_get_itir_on_fault(d,ifa);
- vector = priv_emulate((struct domain *)current,regs,isr);
+ itir = vcpu_get_itir_on_fault(ed,ifa);
+ vector = priv_emulate(current,regs,isr);
if (vector == IA64_RETRY) {
reflect_interruption(ifa,isr,itir,regs,
IA64_ALT_DATA_TLB_VECTOR | IA64_FORCED_IFA);
void
ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
{
- extern unsigned long vcpu_get_itir_on_fault(struct domain *vcpu, UINT64 ifa);
- struct domain *d = (struct domain *) current;
+ struct domain *d = (struct domain *) current->domain;
+ struct exec_domain *ed = (struct domain *) current;
unsigned long check_lazy_cover = 0;
unsigned long psr = regs->cr_ipsr;
- unsigned long itir = vcpu_get_itir_on_fault(d,ifa);
+ unsigned long itir = vcpu_get_itir_on_fault(ed,ifa);
if (!(psr & IA64_PSR_CPL)) {
printf("ia64_handle_reflection: reflecting with priv=0!!\n");
// FIXME: no need to pass itir in to this routine as we need to
// compute the virtual itir anyway (based on domain's RR.ps)
// AND ACTUALLY reflect_interruption doesn't use it anyway!
- itir = vcpu_get_itir_on_fault(d,ifa);
+ itir = vcpu_get_itir_on_fault(ed,ifa);
switch(vector) {
case 8:
vector = IA64_DIRTY_BIT_VECTOR; break;
vector = IA64_DISABLED_FPREG_VECTOR; break;
case 26:
printf("*** NaT fault... attempting to handle as privop\n");
- vector = priv_emulate(d,regs,isr);
+ vector = priv_emulate(ed,regs,isr);
if (vector == IA64_NO_FAULT) {
printf("*** Handled privop masquerading as NaT fault\n");
return;
while(vector);
return;
}
- if (check_lazy_cover && handle_lazy_cover(d, isr, regs)) return;
+ if (check_lazy_cover && handle_lazy_cover(ed, isr, regs)) return;
reflect_interruption(ifa,isr,itir,regs,vector);
}
// NOTE: DOES NOT SET VCPU's rrs[x] value!!
int set_one_rr(unsigned long rr, unsigned long val)
{
- struct domain *d = current;
+ struct exec_domain *ed = current;
unsigned long rreg = REGION_NUMBER(rr);
ia64_rr rrv, newrrv, memrrv;
unsigned long newrid;
rrv.rrval = val;
newrrv.rrval = 0;
- newrid = d->starting_rid + rrv.rid;
+ newrid = ed->domain->starting_rid + rrv.rid;
- if (newrid > d->ending_rid) return 0;
+ if (newrid > ed->domain->ending_rid) return 0;
memrrv.rrval = rrv.rrval;
if (rreg == 7) {
newrrv.rid = newrid;
newrrv.ve = VHPT_ENABLED_REGION_7;
newrrv.ps = IA64_GRANULE_SHIFT;
- ia64_new_rr7(vmMangleRID(newrrv.rrval));
+ ia64_new_rr7(vmMangleRID(newrrv.rrval),ed->vcpu_info);
}
else {
newrrv.rid = newrid;
return 1;
}
-void init_all_rr(struct domain *d)
+void init_all_rr(struct exec_domain *ed)
{
ia64_rr rrv;
rrv.rrval = 0;
- rrv.rid = d->metaphysical_rid;
+ rrv.rid = ed->domain->metaphysical_rid;
rrv.ps = PAGE_SHIFT;
rrv.ve = 1;
- d->shared_info->arch.rrs[0] = -1;
- d->shared_info->arch.rrs[1] = rrv.rrval;
- d->shared_info->arch.rrs[2] = rrv.rrval;
- d->shared_info->arch.rrs[3] = rrv.rrval;
- d->shared_info->arch.rrs[4] = rrv.rrval;
- d->shared_info->arch.rrs[5] = rrv.rrval;
- d->shared_info->arch.rrs[6] = rrv.rrval;
-// d->shared_info->arch.rrs[7] = rrv.rrval;
+if (!ed->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
+ ed->vcpu_info->arch.rrs[0] = -1;
+ ed->vcpu_info->arch.rrs[1] = rrv.rrval;
+ ed->vcpu_info->arch.rrs[2] = rrv.rrval;
+ ed->vcpu_info->arch.rrs[3] = rrv.rrval;
+ ed->vcpu_info->arch.rrs[4] = rrv.rrval;
+ ed->vcpu_info->arch.rrs[5] = rrv.rrval;
+ ed->vcpu_info->arch.rrs[6] = rrv.rrval;
+// ed->shared_info->arch.rrs[7] = rrv.rrval;
}
/* XEN/ia64 INTERNAL ROUTINES */
-unsigned long physicalize_rid(struct domain *d, unsigned long rid)
+unsigned long physicalize_rid(struct exec_domain *ed, unsigned long rid)
{
ia64_rr rrv;
rrv.rrval = rid;
- rrv.rid += d->starting_rid;
+ rrv.rid += ed->domain->starting_rid;
return rrv.rrval;
}
unsigned long
-virtualize_rid(struct domain *d, unsigned long rid)
+virtualize_rid(struct exec_domain *ed, unsigned long rid)
{
ia64_rr rrv;
rrv.rrval = rid;
- rrv.rid -= d->starting_rid;
+ rrv.rid -= ed->domain->starting_rid;
return rrv.rrval;
}
// rr7 (because we have to to assembly and physical mode
// to change rr7). If no change to rr7 is required, returns 0.
//
-unsigned long load_region_regs(struct domain *d)
+unsigned long load_region_regs(struct exec_domain *ed)
{
unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6;
unsigned long oldrr7, newrr7;
// TODO: These probably should be validated
- if (d->metaphysical_mode) {
+ if (ed->vcpu_info->arch.metaphysical_mode) {
ia64_rr rrv;
- rrv.rid = d->metaphysical_rid;
+ rrv.rid = ed->domain->metaphysical_rid;
rrv.ps = PAGE_SHIFT;
rrv.ve = 1;
rr0 = rr1 = rr2 = rr3 = rr4 = rr5 = rr6 = newrr7 = rrv.rrval;
}
else {
- rr0 = physicalize_rid(d, d->shared_info->arch.rrs[0]);
- rr1 = physicalize_rid(d, d->shared_info->arch.rrs[1]);
- rr2 = physicalize_rid(d, d->shared_info->arch.rrs[2]);
- rr3 = physicalize_rid(d, d->shared_info->arch.rrs[3]);
- rr4 = physicalize_rid(d, d->shared_info->arch.rrs[4]);
- rr5 = physicalize_rid(d, d->shared_info->arch.rrs[5]);
- rr6 = physicalize_rid(d, d->shared_info->arch.rrs[6]);
- newrr7 = physicalize_rid(d, d->shared_info->arch.rrs[7]);
+ rr0 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[0]);
+ rr1 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[1]);
+ rr2 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[2]);
+ rr3 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[3]);
+ rr4 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[4]);
+ rr5 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[5]);
+ rr6 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[6]);
+ newrr7 = physicalize_rid(ed, ed->vcpu_info->arch.rrs[7]);
}
set_rr_no_srlz(0x0000000000000000L, rr0);
--- /dev/null
+/*
+ * Intel SMP support routines.
+ *
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ *
+ * This code is released under the GNU General Public License version 2 or
+ * later.
+ */
+
+//#include <xen/irq.h>
+#include <xen/sched.h>
+#include <xen/delay.h>
+#include <xen/spinlock.h>
+#include <asm/smp.h>
+//#include <asm/mc146818rtc.h>
+#include <asm/pgalloc.h>
+//#include <asm/smpboot.h>
+#include <asm/hardirq.h>
+
+//#if CONFIG_SMP || IA64
+#if CONFIG_SMP
+//Huh? This seems to be used on ia64 even if !CONFIG_SMP
+void smp_send_event_check_mask(unsigned long cpu_mask)
+{
+ dummy();
+ //send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
+}
+
+//Huh? This seems to be used on ia64 even if !CONFIG_SMP
+void flush_tlb_mask(unsigned long mask)
+{
+ dummy();
+}
+
+//Huh? This seems to be used on ia64 even if !CONFIG_SMP
+int try_flush_tlb_mask(unsigned long mask)
+{
+ dummy();
+ return 1;
+}
+#endif
--- /dev/null
+// expand later
+int ht_per_core = 1;
--- /dev/null
+# Xen/ia64 heavily leverages/reuses many files from Linux/ia64
+# you need the following files from kernel.org
+# linux-2.6.7.tar.gz
+# linux-2.6.7-ia64-040619.diff.gz
+# place these in the parent directory of the xenXXX.bk tree
+# e.g. xen-unstable.bk should be in the same directory as linux-2.6.7.tar.gz
+
+# unpack linux-2.6.7 in the xenXXX.bk/.. directory and apply the patch
+gunzip linux-2.6.7-ia64-040619.diff.gz
+tar xzf linux-2.6.7.tar.gz
+cd linux-2.6.7
+# is there a way to gunzip as part of patch?
+patch -p1 <../../../linux-2.6.7-ia64-040619.diff
+
+# go back to the xen subdirectory of xen*.bk
+cd xen*.bk/xen
+
+# create and patch the linux/ia64 files
+bash arch/ia64/tools/mkbuildtree
+# this should print out many patch messages but no errors
+bash arch/ia64/tools/mkbuildtree
+
+# build xen/ia64
+# if using cross-compiler
+make TARGET_ARCH=ia64
+# else if native
+make
--- /dev/null
+#!/bin/sh
+#
+# run in xen-X.X/xen directory after unpacking linux in same directory
+
+XEN=$PWD
+#LINUX=$XEN/linux-2.6.7
+LINUX=$XEN/../../linux-2.6.7
+LINUXPATCH=$XEN/arch/ia64/patch/linux-2.6.7
+XENPATCH=$XEN/arch/ia64/patch/xen-2.0.1
+
+cp_patch ()
+{
+ #diff -u $LINUX/$1 $XEN/$2 > $LINUXPATCH/$3
+ cp $LINUX/$1 $XEN/$2
+ patch <$LINUXPATCH/$3 $XEN/$2
+}
+
+xen_patch ()
+{
+ #patch <$XENPATCH/$2 $XEN/$1
+ echo 'skipping patch of' $XEN/$1
+}
+
+softlink ()
+{
+ ln -s $LINUX/$1 $XEN/$2
+}
+
+null ()
+{
+ touch $XEN/$1
+}
+
+
+# ensure linux directory is set up
+if [ ! -d $LINUX ]; then
+ echo "ERROR: $LINUX directory doesn't exist"
+ echo "(don't forget to apply the -ia64 patch to it too)"
+ exit
+fi
+
+# setup
+
+#mkdir arch/ia64
+#mkdir arch/ia64/lib
+#mkdir include/asm-ia64
+mkdir include/asm-generic
+mkdir include/asm-ia64/linux
+mkdir include/asm-ia64/linux/byteorder
+# use "gcc -Iinclude/asm-ia64" to find these linux includes
+ln -s $XEN/include/xen $XEN/include/linux
+ln -s $XEN/include/asm-ia64/linux $XEN/include/asm-ia64/xen
+
+# prepare for building asm-offsets (circular dependency)
+touch include/asm-ia64/asm-offsets.h
+sleep 2
+touch arch/ia64/asm-offsets.c
+
+# patches to xen/common files
+#xen_patch common/domain.c domain.c
+#xen_patch common/dom_mem_ops.c dom_mem_ops.c
+#xen_patch common/grant_table.c grant_table.c
+#xen_patch common/kernel.c kernel.c
+#xen_patch common/dom0_ops.c dom0_ops.c
+#xen_patch common/memory.c memory.c
+#xen_patch common/keyhandler.c keyhandler.c
+#xen_patch common/softirq.c softirq.c
+#xen_patch common/string.c string.c
+#xen_patch common/elf.c elf.c
+#xen_patch common/schedule.c schedule.c
+#xen_patch drivers/char/serial.c serial.c
+#xen_patch drivers/char/console.c console.c
+#xen_patch include/public/xen.h xen.h
+#xen_patch include/xen/grant_table.h grant_table.h
+#xen_patch include/xen/init.h init.h
+#xen_patch include/xen/irq.h irq.h
+#xen_patch include/xen/list.h list.h
+#xen_patch include/xen/sched.h sched.h
+#xen_patch include/xen/slab.h slab.h
+#xen_patch include/xen/time.h time.h
+
+
+# arch/ia64 files
+
+cp_patch arch/ia64/kernel/efi.c arch/ia64/efi.c efi.c
+cp_patch arch/ia64/kernel/entry.S arch/ia64/entry.S entry.S
+cp_patch arch/ia64/kernel/head.S arch/ia64/head.S head.S
+#cp_patch arch/ia64/kernel/init_task.c arch/ia64/init_task.c init_task.c
+cp_patch arch/ia64/kernel/irq_ia64.c arch/ia64/irq_ia64.c irq_ia64.c
+cp_patch arch/ia64/kernel/ivt.S arch/ia64/ivt.S ivt.S
+#cp_patch arch/ia64/kernel/minstate.h arch/ia64/minstate.h minstate.h
+cp_patch arch/ia64/kernel/setup.c arch/ia64/setup.c setup.c
+cp_patch arch/ia64/kernel/time.c arch/ia64/time.c time.c
+cp_patch arch/ia64/kernel/unaligned.c arch/ia64/unaligned.c unaligned.c
+cp_patch arch/ia64/kernel/vmlinux.lds.S arch/ia64/xen.lds.S lds.S
+
+cp_patch mm/bootmem.c arch/ia64/mm_bootmem.c mm_bootmem.c
+cp_patch mm/page_alloc.c arch/ia64/page_alloc.c page_alloc.c
+cp_patch mm/slab.c arch/ia64/slab.c slab.c
+
+cp_patch arch/ia64/mm/contig.c arch/ia64/mm_contig.c mm_contig.c
+cp_patch arch/ia64/mm/tlb.c arch/ia64/tlb.c tlb.c
+
+cp_patch arch/ia64/hp/sim/hpsim_irq.c arch/ia64/hpsim_irq.c hpsim_irq.c
+
+softlink arch/ia64/kernel/efi_stub.S arch/ia64/efi_stub.S
+softlink arch/ia64/kernel/entry.h arch/ia64/entry.h
+softlink arch/ia64/kernel/ia64_ksyms.c arch/ia64/ia64_ksyms.c
+softlink arch/ia64/kernel/irq_lsapic.c arch/ia64/irq_lsapic.c
+softlink arch/ia64/kernel/machvec.c arch/ia64/machvec.c
+softlink arch/ia64/kernel/pal.S arch/ia64/pal.S
+softlink arch/ia64/kernel/patch.c arch/ia64/patch.c
+softlink arch/ia64/kernel/sal.c arch/ia64/sal.c
+softlink arch/ia64/kernel/minstate.h arch/ia64/minstate.h
+
+softlink arch/ia64/lib/bitop.c arch/ia64/lib/bitop.c
+softlink arch/ia64/lib/carta_random.S arch/ia64/lib/carta_random.S
+softlink arch/ia64/lib/checksum.c arch/ia64/lib/checksum.c
+softlink arch/ia64/lib/clear_page.S arch/ia64/lib/clear_page.S
+softlink arch/ia64/lib/clear_user.S arch/ia64/lib/clear_user.S
+softlink arch/ia64/lib/copy_page_mck.S arch/ia64/lib/copy_page_mck.S
+softlink arch/ia64/lib/copy_page.S arch/ia64/lib/copy_page.S
+softlink arch/ia64/lib/copy_user.S arch/ia64/lib/copy_user.S
+softlink arch/ia64/lib/csum_partial_copy.c arch/ia64/lib/csum_partial_copy.c
+softlink arch/ia64/lib/dec_and_lock.c arch/ia64/lib/dec_and_lock.c
+softlink arch/ia64/lib/do_csum.S arch/ia64/lib/do_csum.S
+softlink arch/ia64/lib/flush.S arch/ia64/lib/flush.S
+softlink arch/ia64/lib/idiv32.S arch/ia64/lib/idiv32.S
+softlink arch/ia64/lib/idiv64.S arch/ia64/lib/idiv64.S
+softlink arch/ia64/lib/io.c arch/ia64/lib/io.c
+softlink arch/ia64/lib/ip_fast_csum.S arch/ia64/lib/ip_fast_csum.S
+softlink arch/ia64/lib/memcpy_mck.S arch/ia64/lib/memcpy_mck.S
+softlink arch/ia64/lib/memcpy.S arch/ia64/lib/memcpy.S
+softlink arch/ia64/lib/memset.S arch/ia64/lib/memset.S
+softlink arch/ia64/lib/strlen.S arch/ia64/lib/strlen.S
+softlink arch/ia64/lib/strlen_user.S arch/ia64/lib/strlen_user.S
+softlink arch/ia64/lib/strncpy_from_user.S arch/ia64/lib/strncpy_from_user.S
+softlink arch/ia64/lib/strnlen_user.S arch/ia64/lib/strnlen_user.S
+softlink arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c
+softlink arch/ia64/lib/xor.S arch/ia64/lib/xor.S
+
+softlink lib/cmdline.c arch/ia64/cmdline.c
+
+softlink arch/ia64/hp/sim/hpsim.S arch/ia64/hpsim.S
+
+# xen/include/asm-generic files
+
+softlink include/asm-generic/cpumask_const_value.h include/asm-generic/cpumask_const_value.h cpumask_const_value.h
+softlink include/asm-generic/cpumask.h include/asm-generic/cpumask.h cpumask.h
+softlink include/asm-generic/cpumask_up.h include/asm-generic/cpumask_up.h cpumask_up.h
+softlink include/asm-generic/cpumask_arith.h include/asm-generic/cpumask_arith.h cpumask_arith.h
+softlink include/asm-generic/div64.h include/asm-generic/div64.h div64.h
+softlink include/asm-generic/ide_iops.h include/asm-generic/ide_iops.h ide_iops.h
+softlink include/asm-generic/pci-dma-compat.h include/asm-generic/pci-dma-compat.h pci-dma-compat.h
+softlink include/asm-generic/pci.h include/asm-generic/pci.h pci.h
+softlink include/asm-generic/pgtable.h include/asm-generic/pgtable.h pgtable.h
+softlink include/asm-generic/sections.h include/asm-generic/sections.h sections.h
+softlink include/asm-generic/topology.h include/asm-generic/topology.h topology.h
+softlink include/asm-generic/vmlinux.lds.h include/asm-generic/vmlinux.lds.h vmlinux.lds.h
+
+
+# xen/include/asm-ia64 files
+
+cp_patch arch/ia64/hp/sim/hpsim_ssc.h include/asm-ia64/hpsim_ssc.h hpsim_ssc.h
+
+cp_patch include/asm-ia64/current.h include/asm-ia64/current.h current.h
+cp_patch include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h gcc_intrin.h
+cp_patch include/asm-ia64/hardirq.h include/asm-ia64/hardirq.h hardirq.h
+cp_patch include/asm-ia64/hw_irq.h include/asm-ia64/hw_irq.h hw_irq.h
+cp_patch include/asm-ia64/ide.h include/asm-ia64/ide.h ide.h
+cp_patch include/asm-ia64/io.h include/asm-ia64/io.h io.h
+cp_patch include/asm-ia64/irq.h include/asm-ia64/irq.h irq.h
+cp_patch include/asm-ia64/kregs.h include/asm-ia64/kregs.h kregs.h
+cp_patch include/asm-ia64/page.h include/asm-ia64/page.h page.h
+cp_patch include/asm-ia64/processor.h include/asm-ia64/processor.h processor.h
+cp_patch include/asm-ia64/sal.h include/asm-ia64/sal.h sal.h
+cp_patch include/asm-ia64/system.h include/asm-ia64/system.h system.h
+cp_patch include/asm-ia64/types.h include/asm-ia64/types.h types.h
+
+null include/asm-ia64/desc.h
+null include/asm-ia64/domain_page.h
+null include/asm-ia64/flushtlb.h
+null include/asm-ia64/io_apic.h
+null include/asm-ia64/pdb.h
+
+softlink include/asm-ia64/acpi.h include/asm-ia64/acpi.h
+softlink include/asm-ia64/asmmacro.h include/asm-ia64/asmmacro.h
+softlink include/asm-ia64/atomic.h include/asm-ia64/atomic.h
+softlink include/asm-ia64/bitops.h include/asm-ia64/bitops.h
+softlink include/asm-ia64/break.h include/asm-ia64/break.h
+softlink include/asm-ia64/bug.h include/asm-ia64/bug.h
+softlink include/asm-ia64/byteorder.h include/asm-ia64/byteorder.h
+softlink include/asm-ia64/cacheflush.h include/asm-ia64/cacheflush.h
+softlink include/asm-ia64/cache.h include/asm-ia64/cache.h
+softlink include/asm-ia64/checksum.h include/asm-ia64/checksum.h
+softlink include/asm-ia64/cpumask.h include/asm-ia64/cpumask.h
+softlink include/asm-ia64/delay.h include/asm-ia64/delay.h
+softlink include/asm-ia64/div64.h include/asm-ia64/div64.h
+softlink include/asm-ia64/dma.h include/asm-ia64/dma.h
+softlink include/asm-ia64/dma-mapping.h include/asm-ia64/dma-mapping.h
+softlink include/asm-ia64/fpu.h include/asm-ia64/fpu.h
+softlink include/asm-ia64/hdreg.h include/asm-ia64/hdreg.h
+softlink include/asm-ia64/ia32.h include/asm-ia64/ia32.h
+softlink include/asm-ia64/ia64regs.h include/asm-ia64/ia64regs.h
+softlink include/asm-ia64/intrinsics.h include/asm-ia64/intrinsics.h
+softlink include/asm-ia64/ioctl.h include/asm-ia64/ioctl.h
+softlink include/asm-ia64/linkage.h include/asm-ia64/linkage.h
+softlink include/asm-ia64/machvec.h include/asm-ia64/machvec.h
+softlink include/asm-ia64/machvec_hpsim.h include/asm-ia64/machvec_hpsim.h
+softlink include/asm-ia64/mca_asm.h include/asm-ia64/mca_asm.h
+softlink include/asm-ia64/mca.h include/asm-ia64/mca.h
+softlink include/asm-ia64/meminit.h include/asm-ia64/meminit.h
+softlink include/asm-ia64/mman.h include/asm-ia64/mman.h
+softlink include/asm-ia64/numa.h include/asm-ia64/numa.h
+softlink include/asm-ia64/pal.h include/asm-ia64/pal.h
+softlink include/asm-ia64/param.h include/asm-ia64/param.h
+softlink include/asm-ia64/patch.h include/asm-ia64/patch.h
+softlink include/asm-ia64/pci.h include/asm-ia64/pci.h
+softlink include/asm-ia64/percpu.h include/asm-ia64/percpu.h
+softlink include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h
+softlink include/asm-ia64/pgtable.h include/asm-ia64/pgtable.h
+softlink include/asm-ia64/ptrace.h include/asm-ia64/ptrace.h
+softlink include/asm-ia64/ptrace_offsets.h include/asm-ia64/ptrace_offsets.h
+softlink include/asm-ia64/rse.h include/asm-ia64/rse.h
+softlink include/asm-ia64/rwsem.h include/asm-ia64/rwsem.h
+softlink include/asm-ia64/scatterlist.h include/asm-ia64/scatterlist.h
+softlink include/asm-ia64/sections.h include/asm-ia64/sections.h
+softlink include/asm-ia64/semaphore.h include/asm-ia64/semaphore.h
+softlink include/asm-ia64/serial.h include/asm-ia64/serial.h
+softlink include/asm-ia64/sigcontext.h include/asm-ia64/sigcontext.h
+softlink include/asm-ia64/signal.h include/asm-ia64/signal.h
+softlink include/asm-ia64/smp.h include/asm-ia64/smp.h
+softlink include/asm-ia64/spinlock.h include/asm-ia64/spinlock.h
+softlink include/asm-ia64/string.h include/asm-ia64/string.h
+softlink include/asm-ia64/thread_info.h include/asm-ia64/thread_info.h
+softlink include/asm-ia64/timex.h include/asm-ia64/timex.h
+softlink include/asm-ia64/topology.h include/asm-ia64/topology.h
+softlink include/asm-ia64/uaccess.h include/asm-ia64/uaccess.h
+softlink include/asm-ia64/unaligned.h include/asm-ia64/unaligned.h
+softlink include/asm-ia64/unistd.h include/asm-ia64/unistd.h
+softlink include/asm-ia64/unwind.h include/asm-ia64/unwind.h
+softlink include/asm-ia64/ustack.h include/asm-ia64/ustack.h
+
+# xen/include/asm-ia64/linux/*.h (== linux/include/linux/*.h)
+
+cp_patch include/linux/bootmem.h include/asm-ia64/linux/bootmem.h bootmem.h
+cp_patch include/linux/efi.h include/asm-ia64/linux/efi.h efi.h
+#cp_patch include/linux/init_task.h include/asm-ia64/linux/init_task.h init_task.h
+cp_patch include/linux/interrupt.h include/asm-ia64/linux/interrupt.h interrupt.h
+cp_patch include/linux/mmzone.h include/asm-ia64/linux/mmzone.h mmzone.h
+
+cp_patch include/linux/wait.h include/asm-ia64/linux/wait.h wait.h
+
+cp_patch include/linux/slab.h include/asm-ia64/slab.h slab.h
+
+# following renamed to avoid conflict
+cp_patch include/linux/time.h include/xen/linuxtime.h linuxtime.h
+
+softlink include/linux/bcd.h include/asm-ia64/linux/bcd.h
+softlink include/linux/bitmap.h include/asm-ia64/linux/bitmap.h
+softlink include/linux/bitops.h include/asm-ia64/linux/bitops.h
+softlink include/linux/cpumask.h include/asm-ia64/linux/cpumask.h
+softlink include/linux/dma-mapping.h include/asm-ia64/linux/dma-mapping.h
+softlink include/linux/gfp.h include/asm-ia64/linux/gfp.h
+softlink include/linux/initrd.h include/asm-ia64/linux/initrd.h
+softlink include/linux/kmalloc_sizes.h include/asm-ia64/linux/kmalloc_sizes.h
+softlink include/linux/linkage.h include/asm-ia64/linux/linkage.h
+softlink include/linux/numa.h include/asm-ia64/linux/numa.h
+softlink include/linux/page-flags.h include/asm-ia64/linux/page-flags.h
+softlink include/linux/percpu.h include/asm-ia64/linux/percpu.h
+softlink include/linux/preempt.h include/asm-ia64/linux/preempt.h
+softlink include/linux/rbtree.h include/asm-ia64/linux/rbtree.h
+softlink include/linux/rwsem.h include/asm-ia64/linux/rwsem.h
+softlink include/linux/seq_file.h include/asm-ia64/linux/seq_file.h
+softlink include/linux/serial_core.h include/asm-ia64/linux/serial_core.h
+softlink include/linux/stddef.h include/asm-ia64/linux/stddef.h
+softlink include/linux/thread_info.h include/asm-ia64/linux/thread_info.h
+softlink include/linux/threads.h include/asm-ia64/linux/threads.h
+softlink include/linux/timex.h include/asm-ia64/linux/timex.h
+softlink include/linux/topology.h include/asm-ia64/linux/topology.h
+softlink include/linux/tty.h include/asm-ia64/linux/tty.h
+
+null include/asm-ia64/linux/file.h
+null include/asm-ia64/linux/module.h
+null include/asm-ia64/linux/swap.h
+
+softlink include/linux/byteorder/generic.h include/asm-ia64/linux/byteorder/generic.h
+softlink include/linux/byteorder/little_endian.h include/asm-ia64/linux/byteorder/little_endian.h
+softlink include/linux/byteorder/swab.h include/asm-ia64/linux/byteorder/swab.h
+
*/
#include <linux/sched.h>
+#include <public/arch-ia64.h>
#include <asm/ia64_int.h>
#include <asm/vcpu.h>
#include <asm/regionreg.h>
#include <asm/delay.h>
typedef union {
- struct ia64_psr;
+ struct ia64_psr ia64_psr;
unsigned long i64;
} PSR;
//typedef struct domain VCPU;
// this def for vcpu_regs won't work if kernel stack is present
-#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->regs)
-#define PSCB(x) x->shared_info->arch
+#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
+#define PSCB(x,y) x->vcpu_info->arch.y
#define TRUE 1
#define FALSE 0
if (reg == 44) return (vcpu_set_itc(vcpu,val));
if (reg == 27) return (IA64_ILLOP_FAULT);
if (reg > 7) return (IA64_ILLOP_FAULT);
- PSCB(vcpu).krs[reg] = val;
+ PSCB(vcpu,krs[reg]) = val;
#if 0
// for now, privify kr read's so all kr accesses are privileged
switch (reg) {
IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
{
if (reg > 7) return (IA64_ILLOP_FAULT);
- *val = PSCB(vcpu).krs[reg];
+ *val = PSCB(vcpu,krs[reg]);
return IA64_NO_FAULT;
}
void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
{
/* only do something if mode changes */
- if (!!newmode ^ !!vcpu->metaphysical_mode) {
- if (newmode) set_metaphysical_rr(0,vcpu->metaphysical_rid);
- else if (PSCB(vcpu).rrs[0] != -1)
- set_one_rr(0, PSCB(vcpu).rrs[0]);
- vcpu->metaphysical_mode = newmode;
+ if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
+ if (newmode) set_metaphysical_rr(0,vcpu->domain->metaphysical_rid);
+ else if (PSCB(vcpu,rrs[0]) != -1)
+ set_one_rr(0, PSCB(vcpu,rrs[0]));
+ PSCB(vcpu,metaphysical_mode) = newmode;
}
}
ipsr = (struct ia64_psr *)®s->cr_ipsr;
imm = *(struct ia64_psr *)&imm24;
// interrupt flag
- if (imm.i) PSCB(vcpu).interrupt_delivery_enabled = 0;
- if (imm.ic) PSCB(vcpu).interrupt_collection_enabled = 0;
+ if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
+ if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
// interrupt collection flag
- //if (imm.ic) PSCB(vcpu).interrupt_delivery_enabled = 0;
+ //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
// just handle psr.up and psr.pp for now
if (imm24 & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
| IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
if (imm.pp) { ipsr->pp = 1; psr.pp = 1; }
if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
if (imm.i) {
- if (!PSCB(vcpu).interrupt_delivery_enabled) {
+ if (!PSCB(vcpu,interrupt_delivery_enabled)) {
//printf("vcpu_set_psr_sm: psr.ic 0->1 ");
enabling_interrupts = 1;
}
- PSCB(vcpu).interrupt_delivery_enabled = 1;
+ PSCB(vcpu,interrupt_delivery_enabled) = 1;
}
- if (imm.ic) PSCB(vcpu).interrupt_collection_enabled = 1;
+ if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
// TODO: do this faster
if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
if (newpsr.i) {
- if (!PSCB(vcpu).interrupt_delivery_enabled)
+ if (!PSCB(vcpu,interrupt_delivery_enabled))
enabling_interrupts = 1;
- PSCB(vcpu).interrupt_delivery_enabled = 1;
+ PSCB(vcpu,interrupt_delivery_enabled) = 1;
}
- if (newpsr.ic) PSCB(vcpu).interrupt_collection_enabled = 1;
+ if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
newpsr = *(struct ia64_psr *)&psr;
if (newpsr.cpl == 2) newpsr.cpl = 0;
- if (PSCB(vcpu).interrupt_delivery_enabled) newpsr.i = 1;
+ if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
else newpsr.i = 0;
- if (PSCB(vcpu).interrupt_collection_enabled) newpsr.ic = 1;
+ if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
else newpsr.ic = 0;
*pval = *(unsigned long *)&newpsr;
return IA64_NO_FAULT;
BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
{
- return !!PSCB(vcpu).interrupt_collection_enabled;
+ return !!PSCB(vcpu,interrupt_collection_enabled);
}
BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
{
- return !!PSCB(vcpu).interrupt_delivery_enabled;
+ return !!PSCB(vcpu,interrupt_delivery_enabled);
}
UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
{
- UINT64 dcr = PSCB(vcpu).dcr;
+ UINT64 dcr = PSCB(vcpu,dcr);
PSR psr = {0};
//printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
psr.i64 = prevpsr;
- psr.be = 0; if (dcr & IA64_DCR_BE) psr.be = 1;
- psr.pp = 0; if (dcr & IA64_DCR_PP) psr.pp = 1;
- psr.ic = PSCB(vcpu).interrupt_collection_enabled;
- psr.i = PSCB(vcpu).interrupt_delivery_enabled;
- psr.bn = PSCB(vcpu).banknum;
- psr.dt = 1; psr.it = 1; psr.rt = 1;
- if (psr.cpl == 2) psr.cpl = 0; // !!!! fool domain
+ psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
+ psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
+ psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
+ psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
+ psr.ia64_psr.bn = PSCB(vcpu,banknum);
+ psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
+ if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
// psr.pk = 1;
//printf("returns 0x%016lx...",psr.i64);
return psr.i64;
{
extern unsigned long privop_trace;
//privop_trace=0;
-//verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu).iip);
+//verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
// Reads of cr.dcr on Xen always have the sign bit set, so
// a domain can differentiate whether it is running on SP or not
- *pval = PSCB(vcpu).dcr | 0x8000000000000000L;
+ *pval = PSCB(vcpu,dcr) | 0x8000000000000000L;
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
{
- *pval = PSCB(vcpu).iva & ~0x7fffL;
+ *pval = PSCB(vcpu,iva) & ~0x7fffL;
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
{
- *pval = PSCB(vcpu).pta;
+ *pval = PSCB(vcpu,pta);
return (IA64_NO_FAULT);
}
{
//REGS *regs = vcpu_regs(vcpu);
//*pval = regs->cr_ipsr;
- *pval = PSCB(vcpu).ipsr;
+ *pval = PSCB(vcpu,ipsr);
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
{
- *pval = PSCB(vcpu).isr;
+ *pval = PSCB(vcpu,isr);
return (IA64_NO_FAULT);
}
{
//REGS *regs = vcpu_regs(vcpu);
//*pval = regs->cr_iip;
- *pval = PSCB(vcpu).iip;
+ *pval = PSCB(vcpu,iip);
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
{
- UINT64 val = PSCB(vcpu).ifa;
+ UINT64 val = PSCB(vcpu,ifa);
*pval = val;
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
{
- UINT64 val = PSCB(vcpu).itir;
+ UINT64 val = PSCB(vcpu,itir);
*pval = val;
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
{
- UINT64 val = PSCB(vcpu).iipa;
+ UINT64 val = PSCB(vcpu,iipa);
// SP entry code does not save iipa yet nor does it get
// properly delivered in the pscb
printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
{
- //PSCB(vcpu).ifs = PSCB(vcpu)->regs.cr_ifs;
- //*pval = PSCB(vcpu).regs.cr_ifs;
- *pval = PSCB(vcpu).ifs;
- PSCB(vcpu).incomplete_regframe = 0;
+ //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
+ //*pval = PSCB(vcpu,regs).cr_ifs;
+ *pval = PSCB(vcpu,ifs);
+ PSCB(vcpu,incomplete_regframe) = 0;
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
{
- UINT64 val = PSCB(vcpu).iim;
+ UINT64 val = PSCB(vcpu,iim);
*pval = val;
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
{
- return vcpu_thash(vcpu,PSCB(vcpu).ifa,pval);
+ return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
}
IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
// a domain can differentiate whether it is running on SP or not
// Thus, writes of DCR should ignore the sign bit
//verbose("vcpu_set_dcr: called\n");
- PSCB(vcpu).dcr = val & ~0x8000000000000000L;
+ PSCB(vcpu,dcr) = val & ~0x8000000000000000L;
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
{
- PSCB(vcpu).iva = val & ~0x7fffL;
+ PSCB(vcpu,iva) = val & ~0x7fffL;
return (IA64_NO_FAULT);
}
}
if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
- PSCB(vcpu).pta = val;
+ PSCB(vcpu,pta) = val;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
{
- PSCB(vcpu).ipsr = val;
+ PSCB(vcpu,ipsr) = val;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
{
- PSCB(vcpu).isr = val;
+ PSCB(vcpu,isr) = val;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
{
- PSCB(vcpu).iip = val;
+ PSCB(vcpu,iip) = val;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
{
- PSCB(vcpu).ifa = val;
+ PSCB(vcpu,ifa) = val;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
{
- PSCB(vcpu).itir = val;
+ PSCB(vcpu,itir) = val;
return IA64_NO_FAULT;
}
// SP entry code does not save iipa yet nor does it get
// properly delivered in the pscb
printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
- PSCB(vcpu).iipa = val;
+ PSCB(vcpu,iipa) = val;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
{
//REGS *regs = vcpu_regs(vcpu);
- PSCB(vcpu).ifs = val;
+ PSCB(vcpu,ifs) = val;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
{
- PSCB(vcpu).iim = val;
+ PSCB(vcpu,iim) = val;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
{
- PSCB(vcpu).iha = val;
+ PSCB(vcpu,iha) = val;
return IA64_NO_FAULT;
}
printf("vcpu_pend_interrupt: bad vector\n");
return;
}
- if (!test_bit(vector,PSCB(vcpu).delivery_mask)) return;
- if (test_bit(vector,PSCB(vcpu).irr)) {
+ if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
+ if (test_bit(vector,PSCB(vcpu,irr))) {
//printf("vcpu_pend_interrupt: overrun\n");
}
- set_bit(vector,PSCB(vcpu).irr);
+ set_bit(vector,PSCB(vcpu,irr));
}
#define IA64_TPR_MMI 0x10000
{
UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
- p = &PSCB(vcpu).irr[3];
- q = &PSCB(vcpu).delivery_mask[3];
- r = &PSCB(vcpu).insvc[3];
+ p = &PSCB(vcpu,irr[3]);
+ q = &PSCB(vcpu,delivery_mask[3]);
+ r = &PSCB(vcpu,insvc[3]);
for (i = 3; ; p--, q--, r--, i--) {
bits = *p & *q;
if (bits) break; // got a potential interrupt
//printf("but masked by equal inservice\n");
return SPURIOUS_VECTOR;
}
- if (PSCB(vcpu).tpr & IA64_TPR_MMI) {
+ if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
// tpr.mmi is set
//printf("but masked by tpr.mmi\n");
return SPURIOUS_VECTOR;
}
- if (((PSCB(vcpu).tpr & IA64_TPR_MIC) + 15) >= vector) {
+ if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
//tpr.mic masks class
//printf("but masked by tpr.mic\n");
return SPURIOUS_VECTOR;
vector = vcpu_check_pending_interrupts(vcpu);
if (vector == SPURIOUS_VECTOR) {
- PSCB(vcpu).pending_interruption = 0;
+ PSCB(vcpu,pending_interruption) = 0;
*pval = vector;
return IA64_NO_FAULT;
}
i = vector >> 6;
mask = 1L << (vector & 0x3f);
//printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
- PSCB(vcpu).insvc[i] |= mask;
- PSCB(vcpu).irr[i] &= ~mask;
- PSCB(vcpu).pending_interruption--;
+ PSCB(vcpu,insvc[i]) |= mask;
+ PSCB(vcpu,irr[i]) &= ~mask;
+ PSCB(vcpu,pending_interruption)--;
*pval = vector;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
{
- *pval = PSCB(vcpu).tpr;
+ *pval = PSCB(vcpu,tpr);
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
{
- *pval = PSCB(vcpu).itv;
+ *pval = PSCB(vcpu,itv);
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
{
- *pval = PSCB(vcpu).pmv;
+ *pval = PSCB(vcpu,pmv);
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
{
- *pval = PSCB(vcpu).cmcv;
+ *pval = PSCB(vcpu,cmcv);
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
{
if (val & 0xff00) return IA64_RSVDREG_FAULT;
- PSCB(vcpu).tpr = val;
+ PSCB(vcpu,tpr) = val;
return (IA64_NO_FAULT);
}
UINT64 *p, bits, vec, bitnum;
int i;
- p = &PSCB(vcpu).insvc[3];
+ p = &PSCB(vcpu,insvc[3]);
for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
if (i < 0) {
printf("Trying to EOI interrupt when none are in-service.\r\n");
bits &= ~(1L << bitnum);
*p = bits;
/* clearing an eoi bit may unmask another pending interrupt... */
- if (PSCB(vcpu).interrupt_delivery_enabled) { // but only if enabled...
+ if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
// worry about this later... Linux only calls eoi
// with interrupts disabled
printf("Trying to EOI interrupt with interrupts enabled\r\n");
extern unsigned long privop_trace;
//privop_trace=1;
if (val & 0xef00) return (IA64_ILLOP_FAULT);
- PSCB(vcpu).itv = val;
+ PSCB(vcpu,itv) = val;
if (val & 0x10000) {
-printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu).domain_itm);
- PSCB(vcpu).domain_itm = 0;
+printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu,domain_itm));
+ PSCB(vcpu,domain_itm) = 0;
}
else vcpu_enable_timer(vcpu,1000000L);
return (IA64_NO_FAULT);
IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
{
if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
- PSCB(vcpu).pmv = val;
+ PSCB(vcpu,pmv) = val;
return (IA64_NO_FAULT);
}
IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
{
if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
- PSCB(vcpu).cmcv = val;
+ PSCB(vcpu,cmcv) = val;
return (IA64_NO_FAULT);
}
BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
{
- UINT64 itv = PSCB(vcpu).itv;
+ UINT64 itv = PSCB(vcpu,itv);
return(!itv || !!(itv & 0x10000));
}
BOOLEAN vcpu_timer_expired(VCPU *vcpu)
{
- unsigned long domain_itm = PSCB(vcpu).domain_itm;
+ unsigned long domain_itm = PSCB(vcpu,domain_itm);
unsigned long now = ia64_get_itc();
if (domain_itm && (now > domain_itm) &&
void vcpu_set_next_timer(VCPU *vcpu)
{
- UINT64 d = PSCB(vcpu).domain_itm;
- //UINT64 s = PSCB(vcpu).xen_itm;
+ UINT64 d = PSCB(vcpu,domain_itm);
+ //UINT64 s = PSCB(vcpu,xen_itm);
UINT64 s = local_cpu_data->itm_next;
UINT64 now = ia64_get_itc();
- //UINT64 interval = PSCB(vcpu).xen_timer_interval;
+ //UINT64 interval = PSCB(vcpu,xen_timer_interval);
/* gloss over the wraparound problem for now... we know it exists
* but it doesn't matter right now */
#if 0
/* ensure at least next SP tick is in the future */
- if (!interval) PSCB(vcpu).xen_itm = now +
+ if (!interval) PSCB(vcpu,xen_itm) = now +
#if 0
(running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
DEFAULT_CLOCK_RATE);
//printf("vcpu_set_next_timer: HACK!\n");
#endif
#if 0
- if (PSCB(vcpu).xen_itm < now)
- while (PSCB(vcpu).xen_itm < now + (interval>>1))
- PSCB(vcpu).xen_itm += interval;
+ if (PSCB(vcpu,xen_itm) < now)
+ while (PSCB(vcpu,xen_itm) < now + (interval>>1))
+ PSCB(vcpu,xen_itm) += interval;
#endif
#endif
- if (is_idle_task(vcpu)) {
+ if (is_idle_task(vcpu->domain)) {
printf("****** vcpu_set_next_timer called during idle!!\n");
}
- //s = PSCB(vcpu).xen_itm;
+ //s = PSCB(vcpu,xen_itm);
if (d && (d > now) && (d < s)) {
vcpu_safe_set_itm(d);
//using_domain_as_itm++;
// parameter is a time interval specified in cycles
void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
{
- PSCB(vcpu).xen_timer_interval = cycles;
+ PSCB(vcpu,xen_timer_interval) = cycles;
vcpu_set_next_timer(vcpu);
printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
- PSCB(vcpu).xen_timer_interval);
- __set_bit(PSCB(vcpu).itv, PSCB(vcpu).delivery_mask);
+ PSCB(vcpu,xen_timer_interval));
+ __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
}
IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
//if (val < now) val = now + 1000;
//printf("*** vcpu_set_itm: called with %lx\n",val);
- PSCB(vcpu).domain_itm = val;
+ PSCB(vcpu,domain_itm) = val;
vcpu_set_next_timer(vcpu);
return (IA64_NO_FAULT);
}
{
UINT64 oldnow = ia64_get_itc();
- UINT64 olditm = PSCB(vcpu).domain_itm;
+ UINT64 olditm = PSCB(vcpu,domain_itm);
unsigned long d = olditm - oldnow;
unsigned long x = local_cpu_data->itm_next - oldnow;
local_irq_disable();
if (olditm) {
printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
- PSCB(vcpu).domain_itm = newnow + d;
+ PSCB(vcpu,domain_itm) = newnow + d;
}
local_cpu_data->itm_next = newnow + x;
- d = PSCB(vcpu).domain_itm;
+ d = PSCB(vcpu,domain_itm);
x = local_cpu_data->itm_next;
ia64_set_itc(newnow);
void vcpu_pend_timer(VCPU *vcpu)
{
- UINT64 itv = PSCB(vcpu).itv & 0xff;
+ UINT64 itv = PSCB(vcpu,itv) & 0xff;
if (vcpu_timer_disabled(vcpu)) return;
vcpu_pend_interrupt(vcpu, itv);
//FIXME: This is a hack because everything dies if a timer tick is lost
void vcpu_poke_timer(VCPU *vcpu)
{
- UINT64 itv = PSCB(vcpu).itv & 0xff;
+ UINT64 itv = PSCB(vcpu,itv) & 0xff;
UINT64 now = ia64_get_itc();
- UINT64 itm = PSCB(vcpu).domain_itm;
+ UINT64 itm = PSCB(vcpu,domain_itm);
UINT64 irr;
if (vcpu_timer_disabled(vcpu)) return;
while(1);
}
// using 0xef instead of itv so can get real irr
- if (now > itm && !test_bit(0xefL, PSCB(vcpu).insvc)) {
- if (!test_bit(0xefL,PSCB(vcpu).irr)) {
+ if (now > itm && !test_bit(0xefL, PSCB(vcpu,insvc))) {
+ if (!test_bit(0xefL,PSCB(vcpu,irr))) {
irr = ia64_getreg(_IA64_REG_CR_IRR3);
if (irr & (1L<<(0xef-0xc0))) return;
if (now-itm>0x800000)
IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
{
- PSCB(vcpu).ifa = ifa; // privop traps don't set ifa so do it here
+ PSCB(vcpu,ifa) = ifa; // privop traps don't set ifa so do it here
return (IA64_DATA_TLB_VECTOR | IA64_FORCED_IFA);
}
REGS *regs = vcpu_regs(vcpu);
extern void dorfirfi(void);
- psr.i64 = PSCB(vcpu).ipsr;
- if (psr.cpl < 3) psr.cpl = 2;
- if (psr.i) PSCB(vcpu).interrupt_delivery_enabled = 1;
- int_enable = psr.i;
- if (psr.ic) PSCB(vcpu).interrupt_collection_enabled = 1;
- if (psr.dt && psr.rt && psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
+ psr.i64 = PSCB(vcpu,ipsr);
+ if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
+ if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
+ int_enable = psr.ia64_psr.i;
+ if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
+ if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
else vcpu_set_metaphysical_mode(vcpu,TRUE);
- psr.ic = 1; psr.i = 1;
- psr.dt = 1; psr.rt = 1; psr.it = 1;
- psr.bn = 1;
+ psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
+ psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
+ psr.ia64_psr.bn = 1;
//psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
- if (psr.be) {
+ if (psr.ia64_psr.be) {
printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
return (IA64_ILLOP_FAULT);
}
- PSCB(vcpu).incomplete_regframe = 0; // is this necessary?
- ifs = PSCB(vcpu).ifs;
+ PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
+ ifs = PSCB(vcpu,ifs);
//if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
//if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
if (ifs & regs->cr_ifs & 0x8000000000000000L) {
-#define SI_OFS(x) ((char *)(&PSCB(vcpu).x) - (char *)(vcpu->shared_info))
-if (SI_OFS(iip)!=0x150 || SI_OFS(ipsr)!=0x148 || SI_OFS(ifs)!=0x158) {
+#define SI_OFS(x) ((char *)(&PSCB(vcpu,x)) - (char *)(vcpu->vcpu_info))
+if (SI_OFS(iip)!=0x10 || SI_OFS(ipsr)!=0x08 || SI_OFS(ifs)!=0x18) {
printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
+printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs));
while(1);
}
- // TODO: validate PSCB(vcpu).iip
- // TODO: PSCB(vcpu).ipsr = psr;
- PSCB(vcpu).ipsr = psr.i64;
+ // TODO: validate PSCB(vcpu,iip)
+ // TODO: PSCB(vcpu,ipsr) = psr;
+ PSCB(vcpu,ipsr) = psr.i64;
// now set up the trampoline
regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
__asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
}
else {
regs->cr_ipsr = psr.i64;
- regs->cr_iip = PSCB(vcpu).iip;
+ regs->cr_iip = PSCB(vcpu,iip);
}
- PSCB(vcpu).interrupt_collection_enabled = 1;
+ PSCB(vcpu,interrupt_collection_enabled) = 1;
vcpu_bsw1(vcpu);
- PSCB(vcpu).interrupt_delivery_enabled = int_enable;
+ PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
return (IA64_NO_FAULT);
}
{
REGS *regs = vcpu_regs(vcpu);
- if (!PSCB(vcpu).interrupt_collection_enabled) {
- if (!PSCB(vcpu).incomplete_regframe)
- PSCB(vcpu).ifs = regs->cr_ifs;
- else PSCB(vcpu).incomplete_regframe = 0;
+ if (!PSCB(vcpu,interrupt_collection_enabled)) {
+ if (!PSCB(vcpu,incomplete_regframe))
+ PSCB(vcpu,ifs) = regs->cr_ifs;
+ else PSCB(vcpu,incomplete_regframe) = 0;
}
regs->cr_ifs = 0;
return (IA64_NO_FAULT);
IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
{
extern unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr);
- UINT64 pta = PSCB(vcpu).pta;
+ UINT64 pta = PSCB(vcpu,pta);
UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
UINT64 Mask = (1L << pta_sz) - 1;
if (VHPT_addr1 == 0xe000000000000000L) {
printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
- PSCB(vcpu).iip);
+ PSCB(vcpu,iip));
return (IA64_ILLOP_FAULT);
}
//verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
if ((trp=match_tr(current,vadr)) || (trp=match_dtlb(current,vadr))) {
mask = (1L << trp->ps) - 1;
*padr = ((trp->ppn << 12) & ~mask) | (vadr & mask);
- verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu).iip,*padr);
+ verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
return (IA64_NO_FAULT);
}
- verbose("vcpu_tpa addr=%p, @%p, forcing data miss\n",vadr,PSCB(vcpu).iip);
+ verbose("vcpu_tpa addr=%p, @%p, forcing data miss\n",vadr,PSCB(vcpu,iip));
return vcpu_force_data_miss(vcpu, vadr);
}
{
REGS *regs = vcpu_regs(vcpu);
unsigned long *r = ®s->r16;
- unsigned long *b0 = &PSCB(vcpu).bank0_regs[0];
- unsigned long *b1 = &PSCB(vcpu).bank1_regs[0];
+ unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
+ unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
int i;
- if (PSCB(vcpu).banknum) {
+ if (PSCB(vcpu,banknum)) {
for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
- PSCB(vcpu).banknum = 0;
+ PSCB(vcpu,banknum) = 0;
}
return (IA64_NO_FAULT);
}
{
REGS *regs = vcpu_regs(vcpu);
unsigned long *r = ®s->r16;
- unsigned long *b0 = &PSCB(vcpu).bank0_regs[0];
- unsigned long *b1 = &PSCB(vcpu).bank1_regs[0];
+ unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
+ unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
int i;
- if (!PSCB(vcpu).banknum) {
+ if (!PSCB(vcpu,banknum)) {
for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
- PSCB(vcpu).banknum = 1;
+ PSCB(vcpu,banknum) = 1;
}
return (IA64_NO_FAULT);
}
ia64_rr rr;
- rr.rrval = PSCB(vcpu).rrs[vadr>>61];
+ rr.rrval = PSCB(vcpu,rrs[vadr)>>61];
return(rr.ve);
}
ia64_rr rr;
- rr.rrval = PSCB(vcpu).rrs[vadr>>61];
+ rr.rrval = PSCB(vcpu,rrs[vadr)>>61];
return(rr.ps);
}
ia64_rr rr;
- rr.rrval = PSCB(vcpu).rrs[vadr>>61];
+ rr.rrval = PSCB(vcpu,rrs[vadr)>>61];
return(rr.rid);
}
IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
{
extern void set_one_rr(UINT64, UINT64);
- PSCB(vcpu).rrs[reg>>61] = val;
+ PSCB(vcpu,rrs[reg)>>61] = val;
// warning: set_one_rr() does it "live"
set_one_rr(reg,val);
return (IA64_NO_FAULT);
IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
{
- UINT val = PSCB(vcpu).rrs[reg>>61];
+ UINT val = PSCB(vcpu,rrs[reg)>>61];
*pval = val;
return (IA64_NO_FAULT);
}
{
TR_ENTRY *trp;
- trp = vcpu_match_tr_entry(vcpu,vcpu->shared_info->arch.dtrs,ifa,NDTRS);
+ trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.dtrs,ifa,NDTRS);
if (trp) return trp;
- trp = vcpu_match_tr_entry(vcpu,vcpu->shared_info->arch.itrs,ifa,NITRS);
+ trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.itrs,ifa,NITRS);
if (trp) return trp;
return 0;
}
TR_ENTRY *trp;
if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
- trp = &PSCB(vcpu).dtrs[slot];
+ trp = &PSCB(vcpu,dtrs[slot]);
vcpu_set_tr_entry(trp,pte,itir,ifa);
return IA64_NO_FAULT;
}
TR_ENTRY *trp;
if (slot >= NITRS) return IA64_RSVDREG_FAULT;
- trp = &PSCB(vcpu).itrs[slot];
+ trp = &PSCB(vcpu,itrs[slot]);
vcpu_set_tr_entry(trp,pte,itir,ifa);
return IA64_NO_FAULT;
}
void foobar(void) { /*vcpu_verbose = 1;*/ }
-extern VCPU *dom0;
+extern struct domain *dom0;
void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 logps)
{
ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
ia64_set_psr(psr);
// ia64_srlz_i(); // no srls req'd, will rfi later
- if (IorD & 0x1) vcpu_set_tr_entry(&PSCB(vcpu).itlb,pte,logps<<2,vaddr);
- if (IorD & 0x2) vcpu_set_tr_entry(&PSCB(vcpu).dtlb,pte,logps<<2,vaddr);
+ if (IorD & 0x1) vcpu_set_tr_entry(&PSCB(vcpu,itlb),pte,logps<<2,vaddr);
+ if (IorD & 0x2) vcpu_set_tr_entry(&PSCB(vcpu,dtlb),pte,logps<<2,vaddr);
}
TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
{
- return vcpu_match_tr_entry(vcpu,&vcpu->shared_info->arch.dtlb,ifa,1);
+ return vcpu_match_tr_entry(vcpu,&vcpu->vcpu_info->arch.dtlb,ifa,1);
}
IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
{
UINT64 mpaddr;
IA64FAULT fault;
- unsigned long lookup_domain_mpa(VCPU *,unsigned long);
+ unsigned long lookup_domain_mpa(struct domain *,unsigned long);
unsigned long pteval, dom_imva;
fault = vcpu_tpa(vcpu, vadr, &mpaddr);
printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr);
}
}
- pteval = lookup_domain_mpa(vcpu,mpaddr);
+ pteval = lookup_domain_mpa(vcpu->domain,mpaddr);
if (pteval) {
dom_imva = __va(pteval & _PFN_MASK);
ia64_fc(dom_imva);
// FIXME: When VHPT is in place, flush that too!
local_flush_tlb_all();
// just invalidate the "whole" tlb
- vcpu_purge_tr_entry(&PSCB(vcpu).dtlb);
- vcpu_purge_tr_entry(&PSCB(vcpu).itlb);
+ vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
+ vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
return IA64_NO_FAULT;
}
// if (Xen address) return(IA64_ILLOP_FAULT);
// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
- vcpu_purge_tr_entry(&PSCB(vcpu).dtlb);
- vcpu_purge_tr_entry(&PSCB(vcpu).itlb);
+ vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
+ vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
return IA64_NO_FAULT;
}
void vcpu_set_regs(VCPU *vcpu, REGS *regs)
{
- vcpu->regs = regs;
+ vcpu->arch.regs = regs;
}
--- /dev/null
+/*
+ * Initialize VHPT support.
+ *
+ * Copyright (C) 2004 Hewlett-Packard Co
+ * Dan Magenheimer <dan.magenheimer@hp.com>
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/dma.h>
+#include <asm/vhpt.h>
+
+unsigned long vhpt_paddr, vhpt_pend, vhpt_pte;
+
+void vhpt_flush(void)
+{
+ struct vhpt_lf_entry *v = (void *)VHPT_ADDR;
+ int i;
+
+ for (i = 0; i < VHPT_CACHE_NUM_ENTRIES; i++, v++) {
+ v->itir = 0;
+ v->CChain = 0;
+ v->page_flags = 0;
+ v->ti_tag = INVALID_TI_TAG;
+ }
+ // initialize cache too???
+}
+
+void vhpt_map(void)
+{
+ unsigned long psr;
+
+ psr = ia64_clear_ic();
+ ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, vhpt_pte, VHPT_SIZE_LOG2);
+ ia64_set_psr(psr);
+ ia64_srlz_i();
+}
+
+void vhpt_init(void)
+{
+ unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva;
+ extern unsigned long __alloc_bootmem(unsigned long, unsigned long, unsigned long);
+#if !VHPT_ENABLED
+ return;
+#endif
+ // allocate a huge chunk of physical memory.... how???
+ vhpt_total_size = 1 << VHPT_SIZE_LOG2; // 4MB, 16MB, 64MB, or 256MB
+ vhpt_alignment = 1 << VHPT_SIZE_LOG2; // 4MB, 16MB, 64MB, or 256MB
+ printf("vhpt_init: vhpt size=%p, align=%p\n",vhpt_total_size,vhpt_alignment);
+ vhpt_imva = __alloc_bootmem(vhpt_total_size,vhpt_alignment,
+ __pa(MAX_DMA_ADDRESS));
+ if (!vhpt_imva) {
+ printf("vhpt_init: can't allocate VHPT!\n");
+ while(1);
+ }
+ vhpt_paddr = __pa(vhpt_imva);
+ vhpt_pend = vhpt_paddr + vhpt_total_size - 1;
+ printf("vhpt_init: vhpt paddr=%p, end=%p\n",vhpt_paddr,vhpt_pend);
+ vhpt_pte = pte_val(pfn_pte(vhpt_paddr >> PAGE_SHIFT, PAGE_KERNEL));
+ vhpt_map();
+ ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
+ VHPT_ENABLED);
+ vhpt_flush();
+}
+
#include <asm/pgtable.h>
#include <asm/vhpt.h>
+#if 0
+// FIXME: there's gotta be a better way...
+// ski and spaski are different... moved to xenmisc.c
#define RunningOnHpSki(rx,ry,pn) \
addl rx = 2, r0; \
addl ry = 3, r0; \
;; \
(pn) movl rx = 0x7000004 ; \
;; \
- (pn) cmp.eq pn,p0 = ry, rx; \
+ (pn) cmp.ge pn,p0 = ry, rx; \
;;
//int platform_is_hp_ski(void)
(p8) mov r8 = 1
br.ret.sptk.many b0
END(platform_is_hp_ski)
+#endif
// Change rr7 to the passed value while ensuring
-// Xen is mapped into the new region
+// Xen is mapped into the new region.
+// in0: new rr7 value
+// in1: Xen virtual address of shared info (to be pinned)
#define PSR_BITS_TO_CLEAR \
(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
GLOBAL_ENTRY(ia64_new_rr7)
// not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 1, 7, 0, 0
+ alloc loc1 = ar.pfs, 2, 7, 0, 0
1: {
mov r28 = in0 // copy procedure index
mov r8 = ip // save ip to compute branch
tpa loc6=loc6 // grab this BEFORE changing rr7
;;
#endif
- movl loc5=SHAREDINFO_ADDR
+ mov loc5=in1
;;
tpa loc5=loc5 // grab this BEFORE changing rr7
;;
END(__get_domain_bundle)
GLOBAL_ENTRY(dorfirfi)
-#define SI_CR_IIP_OFFSET 0x150
-#define SI_CR_IPSR_OFFSET 0x148
-#define SI_CR_IFS_OFFSET 0x158
+#define SI_CR_IIP_OFFSET 0x10
+#define SI_CR_IPSR_OFFSET 0x08
+#define SI_CR_IFS_OFFSET 0x18
movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET
movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET
movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET
--- /dev/null
+/*
+ * Xen misc
+ *
+ * Functions/decls that are/may be needed to link with Xen because
+ * of x86 dependencies
+ *
+ * Copyright (C) 2004 Hewlett-Packard Co.
+ * Dan Magenheimer (dan.magenheimer@hp.com)
+ *
+ */
+
+#include <linux/config.h>
+#include <xen/sched.h>
+#include <linux/efi.h>
+#include <asm/processor.h>
+#include <xen/serial.h>
+#include <asm/io.h>
+
+efi_memory_desc_t ia64_efi_io_md;
+EXPORT_SYMBOL(ia64_efi_io_md);
+unsigned long wait_init_idle;
+int phys_proc_id[NR_CPUS];
+unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c
+
+unsigned int watchdog_on = 0; // from arch/x86/nmi.c ?!?
+
+void unw_init(void) { printf("unw_init() skipped (NEED FOR KERNEL UNWIND)\n"); }
+void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
+void hpsim_setup(char **x) { printf("hpsim_setup() skipped (MAY NEED FOR CONSOLE INPUT!!!)\n"); }
+
+long
+is_platform_hp_ski(void)
+{
+ int i;
+ long cpuid[6];
+
+ for (i = 0; i < 5; ++i)
+ cpuid[i] = ia64_get_cpuid(i);
+ if ((cpuid[0] & 0xff) != 'H') return 0;
+ if ((cpuid[3] & 0xff) != 0x4) return 0;
+ if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
+ if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
+ if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
+ return 1;
+}
+
+long
+platform_is_hp_ski(void)
+{
+ extern long running_on_sim;
+ return running_on_sim;
+}
+
+/* calls in xen/common code that are unused on ia64 */
+void synchronise_pagetables(unsigned long cpu_mask) { return; }
+
+int grant_table_create(struct domain *d) { return 0; }
+void grant_table_destroy(struct domain *d)
+{
+ printf("grant_table_destroy: domain_destruct not tested!!!\n");
+ printf("grant_table_destroy: ensure atomic_* calls work in domain_destruct!!\n");
+ dummy();
+ return;
+}
+
+struct pt_regs *get_execution_context(void) { return ia64_task_regs(current); }
+
+void cleanup_writable_pagetable(struct domain *d, int what) { return; }
+
+///////////////////////////////
+// from arch/x86/apic.c
+///////////////////////////////
+
+int reprogram_ac_timer(s_time_t timeout)
+{
+ return 1;
+}
+
+///////////////////////////////
+// from arch/x86/dompage.c
+///////////////////////////////
+
+struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
+{
+ printf("alloc_domheap_pages: called, not implemented\n");
+}
+
+void free_domheap_pages(struct pfn_info *pg, unsigned int order)
+{
+ printf("free_domheap_pages: called, not implemented\n");
+}
+
+
+unsigned long avail_domheap_pages(void)
+{
+ printf("avail_domheap_pages: called, not implemented\n");
+ return 0;
+}
+
+///////////////////////////////
+// from arch/x86/flushtlb.c
+///////////////////////////////
+
+u32 tlbflush_clock;
+u32 tlbflush_time[NR_CPUS];
+
+///////////////////////////////
+// from arch/x86/memory.c
+///////////////////////////////
+
+void init_percpu_info(void)
+{
+ dummy();
+ //memset(percpu_info, 0, sizeof(percpu_info));
+}
+
+void free_page_type(struct pfn_info *page, unsigned int type)
+{
+ dummy();
+}
+
+///////////////////////////////
+// from arch/x86/pci.c
+///////////////////////////////
+
+int
+pcibios_prep_mwi (struct pci_dev *dev)
+{
+ dummy();
+}
+
+///////////////////////////////
+// from arch/x86/pci-irq.c
+///////////////////////////////
+
+void pcibios_enable_irq(struct pci_dev *dev)
+{
+ dummy();
+}
+
+///////////////////////////////
+// from arch/ia64/pci-pc.c
+///////////////////////////////
+
+#include <xen/pci.h>
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ dummy();
+ return 0;
+}
+
+int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value) = NULL;
+int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value) = NULL;
+
+//struct pci_fixup pcibios_fixups[] = { { 0 } };
+struct pci_fixup pcibios_fixups[] = { { 0 } };
+
+void
+pcibios_align_resource(void *data, struct resource *res,
+ unsigned long size, unsigned long align)
+{
+ dummy();
+}
+
+void
+pcibios_update_resource(struct pci_dev *dev, struct resource *root,
+ struct resource *res, int resource)
+{
+ dummy();
+}
+
+void __devinit pcibios_fixup_bus(struct pci_bus *b)
+{
+ dummy();
+}
+
+void __init pcibios_init(void)
+{
+ dummy();
+}
+
+char * __devinit pcibios_setup(char *str)
+{
+ dummy();
+ return 0;
+}
+
+///////////////////////////////
+// from arch/ia64/traps.c
+///////////////////////////////
+
+void show_registers(struct pt_regs *regs)
+{
+ dummy();
+}
+
+///////////////////////////////
+// from common/keyhandler.c
+///////////////////////////////
+void dump_pageframe_info(struct domain *d)
+{
+ printk("dump_pageframe_info not implemented\n");
+}
+
+///////////////////////////////
+// from drivers/char/serial.c
+///////////////////////////////
+
+#include <asm/hpsim_ssc.h>
+
+int
+ia64_serial_putc(unsigned char c)
+{
+ if (platform_is_hp_ski()) {
+ ia64_ssc(c, 0, 0, 0, SSC_PUTCHAR);
+ }
+ else {
+// this is tested on HP Longs Peak platform... it
+// will probably work on other Itanium platforms as
+// well, but undoubtedly needs work
+ longs_peak_putc(c);
+ }
+ return 1;
+}
+
+///////////////////////////////
+// from common/physdev.c
+///////////////////////////////
+void
+physdev_init_dom0(struct domain *d)
+{
+}
+
+int
+physdev_pci_access_modify(domid_t id, int bus, int dev, int func, int enable)
+{
+ return -EINVAL;
+}
--- /dev/null
+/******************************************************************************
+ * kernel.c
+ *
+ * This file should contain architecture-independent bootstrap and low-level
+ * help routines. It's a bit x86/PC specific right now!
+ *
+ * Copyright (c) 2002-2003 K A Fraser
+ */
+
+//#include <stdarg.h>
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/errno.h>
+//#include <xen/spinlock.h>
+#include <xen/multiboot.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+//#include <xen/delay.h>
+#include <xen/compile.h>
+//#include <xen/console.h>
+//#include <xen/serial.h>
+#include <xen/trace.h>
+//#include <asm/shadow.h>
+//#include <asm/io.h>
+//#include <asm/uaccess.h>
+//#include <asm/domain_page.h>
+//#include <public/dom0_ops.h>
+
+unsigned long xenheap_phys_end;
+
+struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
+
+xmem_cache_t *domain_struct_cachep;
+#ifdef IA64
+kmem_cache_t *mm_cachep;
+kmem_cache_t *vm_area_cachep;
+#ifdef CLONE_DOMAIN0
+struct domain *clones[CLONE_DOMAIN0];
+#endif
+#endif
+extern struct domain *dom0;
+extern unsigned long domain0_ready;
+
+#ifndef IA64
+vm_assist_info_t vm_assist_info[MAX_VMASST_TYPE + 1];
+#endif
+
+#ifndef IA64
+struct e820entry {
+ unsigned long addr_lo, addr_hi; /* start of memory segment */
+ unsigned long size_lo, size_hi; /* size of memory segment */
+ unsigned long type; /* type of memory segment */
+};
+#endif
+
+void start_of_day(void);
+
+/* opt_console: comma-separated list of console outputs. */
+#ifdef IA64
+unsigned char opt_console[30] = "com1";
+#else
+unsigned char opt_console[30] = "com1,vga";
+#endif
+/* opt_conswitch: a character pair controlling console switching. */
+/* Char 1: CTRL+<char1> is used to switch console input between Xen and DOM0 */
+/* Char 2: If this character is 'x', then do not auto-switch to DOM0 when it */
+/* boots. Any other value, or omitting the char, enables auto-switch */
+unsigned char opt_conswitch[5] = "a"; /* NB. '`' would disable switching. */
+/* opt_com[12]: Config serial port with a string <baud>,DPS,<io-base>,<irq>. */
+unsigned char opt_com1[30] = "", opt_com2[30] = "";
+/* opt_dom0_mem: Kilobytes of memory allocated to domain 0. */
+unsigned int opt_dom0_mem = 16000;
+/* opt_noht: If true, Hyperthreading is ignored. */
+int opt_noht=0;
+/* opt_noacpi: If true, ACPI tables are not parsed. */
+int opt_noacpi=0;
+/* opt_nosmp: If true, secondary processors are ignored. */
+int opt_nosmp=0;
+/* opt_noreboot: If true, machine will need manual reset on error. */
+int opt_noreboot=0;
+/* opt_ignorebiostables: If true, ACPI and MP tables are ignored. */
+/* NB. This flag implies 'nosmp' and 'noacpi'. */
+int opt_ignorebiostables=0;
+/* opt_watchdog: If true, run a watchdog NMI on each processor. */
+int opt_watchdog=0;
+/* opt_pdb: Name of serial port for Xen pervasive debugger (and enable pdb) */
+unsigned char opt_pdb[10] = "none";
+/* opt_tbuf_size: trace buffer size (in pages) */
+unsigned int opt_tbuf_size = 10;
+/* opt_sched: scheduler - default to Borrowed Virtual Time */
+char opt_sched[10] = "bvt";
+/* opt_physdev_dom0_hide: list of PCI slots to hide from domain 0. */
+/* Format is '(%02x:%02x.%1x)(%02x:%02x.%1x)' and so on. */
+char opt_physdev_dom0_hide[200] = "";
+/* opt_leveltrigger, opt_edgetrigger: Force an IO-APIC-routed IRQ to be */
+/* level- or edge-triggered. */
+/* Example: 'leveltrigger=4,5,6,20 edgetrigger=21'. */
+char opt_leveltrigger[30] = "", opt_edgetrigger[30] = "";
+/*
+ * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
+ * pfn_info table and allocation bitmap.
+ */
+unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
+/*
+ * opt_nmi: one of 'ignore', 'dom0', or 'fatal'.
+ * fatal: Xen prints diagnostic message and then hangs.
+ * dom0: The NMI is virtualised to DOM0.
+ * ignore: The NMI error is cleared and ignored.
+ */
+#ifdef NDEBUG
+char opt_nmi[10] = "dom0";
+#else
+char opt_nmi[10] = "fatal";
+#endif
+/*
+ * Comma-separated list of hexadecimal page numbers containing bad bytes.
+ * e.g. 'badpage=0x3f45,0x8a321'.
+ */
+char opt_badpage[100] = "";
+
+extern long running_on_sim;
+
+void cmain(multiboot_info_t *mbi)
+{
+ unsigned long max_page;
+ unsigned char *cmdline;
+ module_t *mod = (module_t *)__va(mbi->mods_addr);
+ void *heap_start;
+ int i;
+ unsigned long max_mem;
+ unsigned long dom0_memory_start, dom0_memory_end;
+ unsigned long initial_images_start, initial_images_end;
+
+
+ running_on_sim = is_platform_hp_ski();
+
+ /* Parse the command-line options. */
+ cmdline = (unsigned char *)(mbi->cmdline ? __va(mbi->cmdline) : NULL);
+ cmdline_parse(cmdline);
+
+ /* Must do this early -- e.g., spinlocks rely on get_current(). */
+ set_current(&idle0_exec_domain);
+
+ /* We initialise the serial devices very early so we can get debugging. */
+ serial_init_stage1();
+
+ init_console();
+#if 0
+ /* HELLO WORLD --- start-of-day banner text. */
+ printk(XEN_BANNER);
+ printk(" http://www.cl.cam.ac.uk/netos/xen\n");
+ printk(" University of Cambridge Computer Laboratory\n\n");
+ printk(" Xen version %d.%d%s (%s@%s) (%s) %s\n",
+ XEN_VERSION, XEN_SUBVERSION, XEN_EXTRAVERSION,
+ XEN_COMPILE_BY, XEN_COMPILE_DOMAIN,
+ XEN_COMPILER, XEN_COMPILE_DATE);
+#endif
+#ifndef IA64
+ printk(" Latest ChangeSet: %s\n\n", XEN_CHANGESET);
+#endif
+ set_printk_prefix("(XEN) ");
+
+#ifdef IA64
+ //set_current(&idle0_exec_domain);
+ { char *cmdline;
+ setup_arch(&cmdline);
+ }
+ setup_per_cpu_areas();
+ build_all_zonelists();
+ mem_init();
+ //show_mem(); // call to dump lots of memory info for debug
+#else
+ /* We require memory and module information. */
+ if ( (mbi->flags & 9) != 9 )
+ {
+ printk("FATAL ERROR: Bad flags passed by bootloader: 0x%x\n",
+ (unsigned)mbi->flags);
+ for ( ; ; ) ;
+ }
+
+ if ( mbi->mods_count == 0 )
+ {
+ printk("Require at least one Multiboot module!\n");
+ for ( ; ; ) ;
+ }
+
+ if ( opt_xenheap_megabytes < 4 )
+ {
+ printk("Xen heap size is too small to safely continue!\n");
+ for ( ; ; ) ;
+ }
+
+ xenheap_phys_end = opt_xenheap_megabytes << 20;
+
+ max_mem = max_page = (mbi->mem_upper+1024) >> (PAGE_SHIFT - 10);
+#endif
+
+#if defined(__i386__)
+
+ initial_images_start = DIRECTMAP_PHYS_END;
+ initial_images_end = initial_images_start +
+ (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
+ if ( initial_images_end > (max_page << PAGE_SHIFT) )
+ {
+ printk("Not enough memory to stash the DOM0 kernel image.\n");
+ for ( ; ; ) ;
+ }
+ memmove((void *)initial_images_start, /* use low mapping */
+ (void *)mod[0].mod_start, /* use low mapping */
+ mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
+
+ if ( opt_xenheap_megabytes > XENHEAP_DEFAULT_MB )
+ {
+ printk("Xen heap size is limited to %dMB - you specified %dMB.\n",
+ XENHEAP_DEFAULT_MB, opt_xenheap_megabytes);
+ for ( ; ; ) ;
+ }
+
+ ASSERT((sizeof(struct pfn_info) << 20) <=
+ (FRAMETABLE_VIRT_END - FRAMETABLE_VIRT_START));
+
+ init_frametable((void *)FRAMETABLE_VIRT_START, max_page);
+
+#elif defined(__x86_64__)
+
+ init_frametable(__va(xenheap_phys_end), max_page);
+
+ initial_images_start = __pa(frame_table) + frame_table_size;
+ initial_images_end = initial_images_start +
+ (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
+ if ( initial_images_end > (max_page << PAGE_SHIFT) )
+ {
+ printk("Not enough memory to stash the DOM0 kernel image.\n");
+ for ( ; ; ) ;
+ }
+ memmove(__va(initial_images_start),
+ __va(mod[0].mod_start),
+ mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
+
+#endif
+
+#ifndef IA64
+ dom0_memory_start = (initial_images_end + ((4<<20)-1)) & ~((4<<20)-1);
+ dom0_memory_end = dom0_memory_start + (opt_dom0_mem << 10);
+ dom0_memory_end = (dom0_memory_end + PAGE_SIZE - 1) & PAGE_MASK;
+
+ /* Cheesy sanity check: enough memory for DOM0 allocation + some slack? */
+ if ( (dom0_memory_end + (8<<20)) > (max_page << PAGE_SHIFT) )
+ {
+ printk("Not enough memory for DOM0 memory reservation.\n");
+ for ( ; ; ) ;
+ }
+#endif
+
+ printk("Initialised %luMB memory (%lu pages) on a %luMB machine\n",
+ max_page >> (20-PAGE_SHIFT), max_page,
+ max_mem >> (20-PAGE_SHIFT));
+
+#ifndef IA64
+ heap_start = memguard_init(&_end);
+ heap_start = __va(init_heap_allocator(__pa(heap_start), max_page));
+
+ init_xenheap_pages(__pa(heap_start), xenheap_phys_end);
+ printk("Xen heap size is %luKB\n",
+ (xenheap_phys_end-__pa(heap_start))/1024 );
+
+ init_domheap_pages(dom0_memory_end, max_page << PAGE_SHIFT);
+#endif
+
+ /* Initialise the slab allocator. */
+#ifdef IA64
+ kmem_cache_init();
+#else
+ xmem_cache_init();
+ xmem_cache_sizes_init(max_page);
+#endif
+
+ domain_struct_cachep = xmem_cache_create(
+ "domain_cache", sizeof(struct domain),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if ( domain_struct_cachep == NULL )
+ panic("No slab cache for task structs.");
+
+#ifdef IA64
+ // following from proc_caches_init in linux/kernel/fork.c
+ vm_area_cachep = kmem_cache_create("vm_area_struct",
+ sizeof(struct vm_area_struct), 0,
+ SLAB_PANIC, NULL, NULL);
+ mm_cachep = kmem_cache_create("mm_struct",
+ sizeof(struct mm_struct), 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+printk("About to call scheduler_init()\n");
+ scheduler_init();
+printk("About to call time_init()\n");
+ time_init();
+printk("About to call ac_timer_init()\n");
+ ac_timer_init();
+// init_xen_time(); ???
+// schedulers_start(); ???
+// do_initcalls(); ???
+#else
+ start_of_day();
+
+ grant_table_init();
+#endif
+
+ /* Create initial domain 0. */
+printk("About to call do_createdomain()\n");
+ dom0 = do_createdomain(0, 0);
+printk("About to call init_idle_task()\n");
+ init_task.domain = &idle0_domain;
+ init_task.processor = 0;
+ init_task.mm = &init_mm;
+// init_task.thread = INIT_THREAD;
+ init_idle_task();
+ //arch_do_createdomain(current);
+#ifdef CLONE_DOMAIN0
+ {
+ int i;
+ for (i = 0; i < CLONE_DOMAIN0; i++) {
+ clones[i] = do_createdomain(i+1, 0);
+ if ( clones[i] == NULL )
+ panic("Error creating domain0 clone %d\n",i);
+ }
+ }
+#endif
+ if ( dom0 == NULL )
+ panic("Error creating domain 0\n");
+
+ set_bit(DF_PRIVILEGED, &dom0->d_flags);
+
+//printk("About to call shadow_mode_init()\n");
+// shadow_mode_init();
+
+ /* Grab the DOM0 command line. Skip past the image name. */
+printk("About to process command line\n");
+#ifndef IA64
+ cmdline = (unsigned char *)(mod[0].string ? __va(mod[0].string) : NULL);
+ if ( cmdline != NULL )
+ {
+ while ( *cmdline == ' ' ) cmdline++;
+ if ( (cmdline = strchr(cmdline, ' ')) != NULL )
+ while ( *cmdline == ' ' ) cmdline++;
+ }
+#endif
+
+ /*
+ * We're going to setup domain0 using the module(s) that we stashed safely
+ * above our heap. The second module, if present, is an initrd ramdisk.
+ */
+#ifdef IA64
+printk("About to call construct_dom0()\n");
+ if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_end,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0) != 0)
+#else
+ if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_end,
+ (char *)initial_images_start,
+ mod[0].mod_end-mod[0].mod_start,
+ (mbi->mods_count == 1) ? 0 :
+ (char *)initial_images_start +
+ (mod[1].mod_start-mod[0].mod_start),
+ (mbi->mods_count == 1) ? 0 :
+ mod[mbi->mods_count-1].mod_end - mod[1].mod_start,
+ cmdline) != 0)
+#endif
+ panic("Could not set up DOM0 guest OS\n");
+#ifdef CLONE_DOMAIN0
+ {
+ int i;
+ for (i = 0; i < CLONE_DOMAIN0; i++) {
+printk("CONSTRUCTING DOMAIN0 CLONE #%d\n",i+1);
+ if ( construct_dom0(clones[i], dom0_memory_start, dom0_memory_end,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0) != 0)
+ panic("Could not set up DOM0 clone %d\n",i);
+ }
+ }
+#endif
+
+ /* The stash space for the initial kernel image can now be freed up. */
+#ifndef IA64
+ init_domheap_pages(__pa(frame_table) + frame_table_size,
+ dom0_memory_start);
+
+ scrub_heap_pages();
+#endif
+
+printk("About to call init_trace_bufs()\n");
+ init_trace_bufs();
+
+ /* Give up the VGA console if DOM0 is configured to grab it. */
+#ifndef IA64
+ console_endboot(cmdline && strstr(cmdline, "tty0"));
+#endif
+
+ domain_unpause_by_systemcontroller(current);
+#ifdef CLONE_DOMAIN0
+ {
+ int i;
+ for (i = 0; i < CLONE_DOMAIN0; i++)
+ domain_unpause_by_systemcontroller(clones[i]);
+ }
+#endif
+ domain_unpause_by_systemcontroller(dom0);
+ domain0_ready = 1;
+printk("About to call startup_cpu_idle_loop()\n");
+ startup_cpu_idle_loop();
+}
void do_debug_key(unsigned char key, struct xen_regs *regs)
{
(void)debugger_trap_fatal(0xf001, regs);
- asm volatile ("nop"); /* Prevent the compiler doing tail call
+ nop(); /* Prevent the compiler doing tail call
optimisation, as that confuses xendbg a
bit. */
}
acpi_status
acpi_ds_call_control_method (
- struct acpi_thread_state *thread,
+ struct acpi_thread_state *acpi_thread,
struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
acpi_owner_id owner_id,
union acpi_parse_object *origin,
union acpi_operand_object *mth_desc,
- struct acpi_thread_state *thread);
+ struct acpi_thread_state *acpi_thread);
acpi_status
acpi_ds_init_aml_walk (
struct acpi_walk_state *
acpi_ds_pop_walk_state (
- struct acpi_thread_state *thread);
+ struct acpi_thread_state *acpi_thread);
void
acpi_ds_push_walk_state (
struct acpi_walk_state *walk_state,
- struct acpi_thread_state *thread);
+ struct acpi_thread_state *acpi_thread);
acpi_status
acpi_ds_result_stack_pop (
struct acpi_walk_state *
acpi_ds_get_current_walk_state (
- struct acpi_thread_state *thread);
+ struct acpi_thread_state *acpi_thread);
void
acpi_ds_delete_walk_state_cache (
void
acpi_ex_release_all_mutexes (
- struct acpi_thread_state *thread);
+ struct acpi_thread_state *acpi_thread);
void
acpi_ex_unlink_mutex (
void
acpi_ex_link_mutex (
union acpi_operand_object *obj_desc,
- struct acpi_thread_state *thread);
+ struct acpi_thread_state *acpi_thread);
/*
* exprep - ACPI AML (p-code) execution - prep utilities
struct acpi_scope_state scope;
struct acpi_pscope_state parse_scope;
struct acpi_pkg_state pkg;
- struct acpi_thread_state thread;
+ struct acpi_thread_state acpi_thread;
struct acpi_result_values results;
struct acpi_notify_info notify;
};
union acpi_parse_object *next_op; /* next op to be processed */
acpi_parse_downwards descending_callback;
acpi_parse_upwards ascending_callback;
- struct acpi_thread_state *thread;
+ struct acpi_thread_state *acpi_thread;
struct acpi_walk_state *next; /* Next walk_state in list */
};
--- /dev/null
+#undef CLONE_DOMAIN0
+// manufactured from component pieces
+
+// defined in linux/arch/ia64/defconfig
+#define CONFIG_IA64_HP_SIM
+#define CONFIG_IA64_L1_CACHE_SHIFT 7
+// needed by include/asm-ia64/page.h
+#define CONFIG_IA64_PAGE_SIZE_16KB // 4KB doesn't work?!?
+#define CONFIG_IA64_GRANULE_16MB
+// needed in arch/ia64/setup.c to reserve memory for domain0
+#define CONFIG_BLK_DEV_INITRD
+
+#ifndef __ASSEMBLY__
+
+// can't find where this typedef was before?!?
+// needed by include/asm-ia64/processor.h (and other places)
+typedef int pid_t;
+
+// from include/linux/kernel.h
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+//////////////////////////////////////
+
+// FIXME: generated automatically into offsets.h??
+#define IA64_TASK_SIZE 0 // this probably needs to be fixed
+//#define IA64_TASK_SIZE sizeof(struct task_struct)
+
+#define FASTCALL(x) x // see linux/include/linux/linkage.h
+#define fastcall // " "
+
+// from linux/include/linux/types.h
+#define BITS_TO_LONGS(bits) \
+ (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+#define CLEAR_BITMAP(name,bits) \
+ memset(name, 0, BITS_TO_LONGS(bits)*sizeof(unsigned long))
+
+// from linux/include/linux/compiler.h
+#define __user
+
+// FIXME?: x86-ism used in xen/mm.h
+#define LOCK_PREFIX
+
+// from linux/include/linux/mm.h
+extern struct page *mem_map;
+
+// defined in include/asm-x86/mm.h, not really used for ia64
+typedef struct {
+ void (*enable)(struct domain *p);
+ void (*disable)(struct domain *p);
+} vm_assist_info_t;
+extern vm_assist_info_t vm_assist_info[];
+
+// xen/include/asm/config.h
+extern char _end[]; /* standard ELF symbol */
+
+// linux/include/linux/compiler.h
+#define __attribute_const__
+
+// xen/include/asm/config.h
+#define HZ 100
+// leave SMP for a later time
+#define NR_CPUS 1
+//#define NR_CPUS 16
+//#define CONFIG_NR_CPUS 16
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+///////////////////////////////////////////////////////////////
+// xen/include/asm/config.h
+#define XENHEAP_DEFAULT_MB (16)
+#define ELFSIZE 64
+
+///////////////////////////////////////////////////////////////
+
+// get rid of difficult circular include dependency
+#define CMPXCHG_BUGCHECK(v)
+#define CMPXCHG_BUGCHECK_DECL
+
+// from include/asm-ia64/smp.h
+#ifdef CONFIG_SMP
+#error "Lots of things to fix to enable CONFIG_SMP!"
+#endif
+#define get_cpu() 0
+#define put_cpu() do {} while(0)
+
+// from linux/include/linux/mm.h
+struct page;
+
+// function calls; see decl in xen/include/xen/sched.h
+#undef free_task_struct
+#undef alloc_task_struct
+
+// initial task has a different name in Xen
+//#define idle0_task init_task
+#define idle0_exec_domain init_task
+
+// avoid redefining task_t in asm/thread_info.h
+#define task_t struct domain
+
+// linux/include/asm-ia64/machvec.h (linux/arch/ia64/lib/io.c)
+#define platform_inb __ia64_inb
+#define platform_inw __ia64_inw
+#define platform_inl __ia64_inl
+#define platform_outb __ia64_outb
+#define platform_outw __ia64_outw
+#define platform_outl __ia64_outl
+
+// FIXME: This just overrides a use in a typedef (not allowed in ia64,
+// or maybe just in older gcc's?) used in ac_timer.c but should be OK
+// (and indeed is probably required!) elsewhere
+#undef __cacheline_aligned
+#undef ____cacheline_aligned
+#undef ____cacheline_aligned_in_smp
+#define __cacheline_aligned
+#define ____cacheline_aligned
+#define ____cacheline_aligned_in_smp
+
+#include "asm/types.h" // for u64
+struct device {
+#if 0
+ struct list_head node; /* node in sibling list */
+ struct list_head bus_list; /* node in bus's list */
+ struct list_head driver_list;
+ struct list_head children;
+ struct device * parent;
+
+ struct kobject kobj;
+ char bus_id[BUS_ID_SIZE]; /* position on parent bus */
+
+ struct bus_type * bus; /* type of bus device is on */
+ struct device_driver *driver; /* which driver has allocated this
+ device */
+ void *driver_data; /* data private to the driver */
+ void *platform_data; /* Platform specific data (e.g. ACPI,
+ BIOS data relevant to device) */
+ struct dev_pm_info power;
+ u32 power_state; /* Current operating state. In
+ ACPI-speak, this is D0-D3, D0
+ being fully functional, and D3
+ being off. */
+
+ unsigned char *saved_state; /* saved device state */
+ u32 detach_state; /* State to enter when device is
+ detached from its driver. */
+
+#endif
+ u64 *dma_mask; /* dma mask (if dma'able device) */
+#if 0
+ struct list_head dma_pools; /* dma pools (if dma'ble) */
+
+ void (*release)(struct device * dev);
+#endif
+};
+
+// from linux/include/linux/pci.h
+struct pci_bus_region {
+ unsigned long start;
+ unsigned long end;
+};
+
+// defined (why?) in include/asm-i386/processor.h
+// used in common/physdev.c
+#define IO_BITMAP_SIZE 32
+#define IO_BITMAP_BYTES (IO_BITMAP_SIZE * 4)
+
+#define printk printf
+
+#define __ARCH_HAS_SLAB_ALLOCATOR // see include/xen/slab.h
+#define xmem_cache_t kmem_cache_t
+#define xmem_cache_alloc(a) kmem_cache_alloc(a,GFP_KERNEL)
+#define xmem_cache_free(a,b) kmem_cache_free(a,b)
+#define xmem_cache_create kmem_cache_create
+#define xmalloc(_type) kmalloc(sizeof(_type),GFP_KERNEL)
+#define xmalloc_array(_type,_num) kmalloc(sizeof(_type)*_num,GFP_KERNEL)
+#define xfree(a) kfree(a)
+
+#undef __ARCH_IRQ_STAT
+
+#define find_first_set_bit(x) (ffs(x)-1) // FIXME: Is this right???
+
+// from include/asm-x86/*/uaccess.h
+#define array_access_ok(type,addr,count,size) \
+ (likely(sizeof(count) <= 4) /* disallow 64-bit counts */ && \
+ access_ok(type,addr,count*size))
+
+// see drivers/char/serial.c
+#define arch_serial_putc(uart,c) ia64_serial_putc(c)
+// without this, uart_config_stageX does outb's which are non-portable
+#define NO_UART_CONFIG_OK
+
+// see drivers/char/console.c
+#define OPT_CONSOLE_STR "com1"
+
+#define __attribute_used__ __attribute__ ((unused))
+
+// see include/asm-x86/atomic.h (different from standard linux)
+#define _atomic_set(v,i) (((v).counter) = (i))
+#define _atomic_read(v) ((v).counter)
+// FIXME following needs work
+#define atomic_compareandswap(old, new, v) old
+
+// x86 typedef still used in sched.h, may go away later
+//typedef unsigned long l1_pgentry_t;
+
+// removed from include/xen/types.h (why?)
+typedef unsigned long uint64_t;
+typedef unsigned int uint32_t;
+
+// see include/asm-ia64/mm.h, handle remaining pfn_info uses until gone
+#define pfn_info page
+
+// see common/keyhandler.c
+#define nop() asm volatile ("nop 0")
+
+#define ARCH_HAS_EXEC_DOMAIN_MM_PTR
+
+// see arch/x86/nmi.c !?!?
+extern unsigned int watchdog_on;
+
+// xen/include/asm/config.h
+/******************************************************************************
+ * config.h
+ *
+ * A Linux-style configuration list.
+ */
+
+#ifndef __XEN_IA64_CONFIG_H__
+#define __XEN_IA64_CONFIG_H__
+
+#undef CONFIG_X86
+
+//#define CONFIG_SMP 1
+//#define CONFIG_NR_CPUS 2
+//leave SMP for a later time
+#undef CONFIG_SMP
+#undef CONFIG_X86_LOCAL_APIC
+#undef CONFIG_X86_IO_APIC
+#undef CONFIG_X86_L1_CACHE_SHIFT
+
+// this needs to be on to run on hp zx1 with more than 4GB
+// it is hacked around for now though
+//#define CONFIG_VIRTUAL_MEM_MAP
+
+//#ifndef CONFIG_IA64_HP_SIM
+// looks like this is hard to turn off for Xen
+#define CONFIG_ACPI 1
+#define CONFIG_ACPI_BOOT 1
+//#endif
+
+#define CONFIG_PCI 1
+#define CONFIG_PCI_BIOS 1
+#define CONFIG_PCI_DIRECT 1
+
+#define CONFIG_XEN_ATTENTION_KEY 1
+#endif /* __ASSEMBLY__ */
+#endif /* __XEN_IA64_CONFIG_H__ */
+
+// FOLLOWING ADDED FOR XEN POST-NGIO and/or LINUX 2.6.7
+
+// following derived from linux/include/linux/compiler-gcc3.h
+// problem because xen (over?)simplifies include/xen/compiler.h
+#if __GNUC_MAJOR < 3 || __GNUC_MINOR__ >= 3
+# define __attribute_used__ __attribute__((__used__))
+#else
+# define __attribute_used__ __attribute__((__unused__))
+#endif
--- /dev/null
+/******************************************************************************
+ * asm/debugger.h
+ *
+ * Generic hooks into arch-dependent Xen.
+ *
+ * Each debugger should define two functions here:
+ *
+ * 1. debugger_trap_entry():
+ * Called at start of any synchronous fault or trap, before any other work
+ * is done. The idea is that if your debugger deliberately caused the trap
+ * (e.g. to implement breakpoints or data watchpoints) then you can take
+ * appropriate action and return a non-zero value to cause early exit from
+ * the trap function.
+ *
+ * 2. debugger_trap_fatal():
+ * Called when Xen is about to give up and crash. Typically you will use this
+ * hook to drop into a debug session. It can also be used to hook off
+ * deliberately caused traps (which you then handle and return non-zero)
+ * but really these should be hooked off 'debugger_trap_entry'.
+ */
+
+#ifndef __ASM_DEBUGGER_H__
+#define __ASM_DEBUGGER_H__
+
+/* The main trap handlers use these helper macros which include early bail. */
+static inline int debugger_trap_entry(
+ unsigned int vector, struct xen_regs *regs)
+{
+ return 0;
+}
+
+static inline int debugger_trap_fatal(
+ unsigned int vector, struct xen_regs *regs)
+{
+ return 0;
+}
+
+#define debugger_trap_immediate() do {} while(0)
+
+#endif /* __ASM_DEBUGGER_H__ */
--- /dev/null
+/*
+ * Xen domain firmware emulation
+ *
+ * Copyright (C) 2004 Hewlett-Packard Co
+ * Dan Magenheimer (dan.magenheimer@hp.com)
+ */
+
+extern unsigned long dom_pa(unsigned long);
+extern unsigned long dom_fw_setup(struct domain *, char *, int);
+
+#ifndef MB
+#define MB (1024*1024)
+#endif
+
+/* This is used to determined the portion of a domain's metaphysical memory
+ space reserved for the hypercall patch table. */
+//FIXME: experiment with smaller sizes
+#define HYPERCALL_START 1*MB
+#define HYPERCALL_END 2*MB
+
+#define FW_HYPERCALL_BASE_PADDR HYPERCALL_START
+#define FW_HYPERCALL_END_PADDR HYPERCALL_END
+#define FW_HYPERCALL_PADDR(index) (FW_HYPERCALL_BASE_PADDR + (16UL * index))
+
+/*
+ * PAL can be called in physical or virtual mode simply by
+ * branching to pal_entry_point, which is found in one of the
+ * SAL system table entrypoint descriptors (type=0). Parameters
+ * may be passed in r28-r31 (static) or r32-r35 (stacked); which
+ * convention is used depends on which procedure is being called.
+ * r28 contains the PAL index, the indicator of which PAL procedure
+ * is to be called: Index=0 is reserved, 1-255 indicates static
+ * parameters, 256-511 indicates stacked parameters. 512-1023
+ * are implementation-specific and 1024+ are reserved.
+ * rp=b0 indicates the return point.
+ *
+ * A single hypercall is used for all PAL calls.
+ */
+
+#define FW_HYPERCALL_PAL_CALL_INDEX 0x80UL
+#define FW_HYPERCALL_PAL_CALL_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_PAL_CALL_INDEX)
+#define FW_HYPERCALL_PAL_CALL 0x1000UL
+
+/*
+ * SAL consists of a table of descriptors, one of which (type=0)
+ * contains a sal_entry_point which provides access to a number of
+ * functions. Parameters are passed in r33-r39; r32 contains the
+ * index of the SAL function being called. At entry, r1=gp contains
+ * a global pointer which may be needed by the function. rp=b0
+ * indicates the return point. SAL may not be re-entrant; an
+ * OS must ensure it is called by one processor at a time.
+ *
+ * A single hypercall is used for all SAL calls.
+ */
+
+#define FW_HYPERCALL_SAL_CALL_INDEX 0x81UL
+#define FW_HYPERCALL_SAL_CALL_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_SAL_CALL_INDEX)
+#define FW_HYPERCALL_SAL_CALL 0x1001UL
+
+/*
+ * EFI is accessed via the EFI system table, which contains:
+ * - a header which contains version info
+ * - console information (stdin,stdout,stderr)
+ * as well as pointers to:
+ * - the EFI configuration table, which contains GUID/pointer pairs,
+ * one of which is a pointer to the SAL system table; another is
+ * a pointer to the ACPI table
+ * - the runtime services table, which contains a header followed by
+ * a list of (11) unique "runtime" entry points. EFI runtime entry
+ * points are real function descriptors so contain both a (physical)
+ * address and a global pointer. They are entered (at first) in
+ * physical mode, though it is possible (optionally... requests can
+ * be ignored and calls still must be OK) to call one entry point
+ * which switches the others so they are capable of being called in
+ * virtual mode. Parameters are passed in stacked registers, and
+ * rp=b0 indicates the return point.
+ * - the boot services table, which contains bootloader-related
+ * entry points (ADD MORE HERE LATER)
+ *
+ * Each runtime (and boot) entry point requires a unique hypercall.
+ */
+
+/* these are indexes into the runtime services table */
+#define FW_HYPERCALL_EFI_BASE
+#define FW_HYPERCALL_EFI_GET_TIME_INDEX 0UL
+#define FW_HYPERCALL_EFI_SET_TIME_INDEX 1UL
+#define FW_HYPERCALL_EFI_GET_WAKEUP_TIME_INDEX 2UL
+#define FW_HYPERCALL_EFI_SET_WAKEUP_TIME_INDEX 3UL
+#define FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP_INDEX 4UL
+#define FW_HYPERCALL_EFI_GET_VARIABLE_INDEX 5UL
+#define FW_HYPERCALL_EFI_GET_NEXT_VARIABLE_INDEX 6UL
+#define FW_HYPERCALL_EFI_SET_VARIABLE_INDEX 7UL
+#define FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT_INDEX 8UL
+#define FW_HYPERCALL_EFI_RESET_SYSTEM_INDEX 9UL
+
+/* these are hypercall numbers */
+#define FW_HYPERCALL_EFI_GET_TIME 0x300UL
+#define FW_HYPERCALL_EFI_SET_TIME 0x301UL
+#define FW_HYPERCALL_EFI_GET_WAKEUP_TIME 0x302UL
+#define FW_HYPERCALL_EFI_SET_WAKEUP_TIME 0x303UL
+#define FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP 0x304UL
+#define FW_HYPERCALL_EFI_GET_VARIABLE 0x305UL
+#define FW_HYPERCALL_EFI_GET_NEXT_VARIABLE 0x306UL
+#define FW_HYPERCALL_EFI_SET_VARIABLE 0x307UL
+#define FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT 0x308UL
+#define FW_HYPERCALL_EFI_RESET_SYSTEM 0x309UL
+
+/* these are the physical addresses of the pseudo-entry points that
+ * contain the hypercalls */
+#define FW_HYPERCALL_EFI_GET_TIME_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_GET_TIME_INDEX)
+#define FW_HYPERCALL_EFI_SET_TIME_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_SET_TIME_INDEX)
+#define FW_HYPERCALL_EFI_GET_WAKEUP_TIME_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_GET_WAKEUP_TIME_INDEX)
+#define FW_HYPERCALL_EFI_SET_WAKEUP_TIME_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_SET_WAKEUP_TIME_INDEX)
+#define FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP_INDEX)
+#define FW_HYPERCALL_EFI_GET_VARIABLE_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_GET_VARIABLE_INDEX)
+#define FW_HYPERCALL_EFI_GET_NEXT_VARIABLE_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_GET_NEXT_VARIABLE_INDEX)
+#define FW_HYPERCALL_EFI_SET_VARIABLE_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_SET_VARIABLE_INDEX)
+#define FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT_INDEX)
+#define FW_HYPERCALL_EFI_RESET_SYSTEM_PADDR FW_HYPERCALL_PADDR(FW_HYPERCALL_EFI_RESET_SYSTEM_INDEX)
--- /dev/null
+/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
+#ifndef __ASM_DOMAIN_H__
+#define __ASM_DOMAIN_H__
+
+#include <linux/thread_info.h>
+
+extern void arch_do_createdomain(struct exec_domain *);
+
+extern int arch_final_setup_guestos(
+ struct exec_domain *, full_execution_context_t *);
+
+extern void domain_relinquish_memory(struct domain *);
+
+struct arch_domain {
+ struct mm_struct *active_mm;
+ struct mm_struct *mm;
+ int metaphysical_rid;
+ int starting_rid; /* first RID assigned to domain */
+ int ending_rid; /* one beyond highest RID assigned to domain */
+ int rid_bits; /* number of virtual rid bits (default: 18) */
+ int breakimm;
+ u64 xen_vastart;
+ u64 xen_vaend;
+ u64 shared_info_va;
+};
+#define metaphysical_rid arch.metaphysical_rid
+#define starting_rid arch.starting_rid
+#define ending_rid arch.ending_rid
+#define rid_bits arch.rid_bits
+#define breakimm arch.breakimm
+#define xen_vastart arch.xen_vastart
+#define xen_vaend arch.xen_vaend
+#define shared_info_va arch.shared_info_va
+
+struct arch_exec_domain {
+ void *regs; /* temporary until find a better way to do privops */
+ struct thread_struct _thread;
+ struct mm_struct *active_mm;
+};
+#define active_mm arch.active_mm
+#define thread arch._thread
+
+// FOLLOWING FROM linux-2.6.7/include/sched.h
+
+struct mm_struct {
+ struct vm_area_struct * mmap; /* list of VMAs */
+#ifndef XEN
+ struct rb_root mm_rb;
+#endif
+ struct vm_area_struct * mmap_cache; /* last find_vma result */
+ unsigned long free_area_cache; /* first hole */
+ pgd_t * pgd;
+ atomic_t mm_users; /* How many users with user space? */
+ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
+ int map_count; /* number of VMAs */
+#ifndef XEN
+ struct rw_semaphore mmap_sem;
+#endif
+ spinlock_t page_table_lock; /* Protects task page tables and mm->rss */
+
+ struct list_head mmlist; /* List of all active mm's. These are globally strung
+ * together off init_mm.mmlist, and are protected
+ * by mmlist_lock
+ */
+
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
+ unsigned long rss, total_vm, locked_vm;
+ unsigned long def_flags;
+
+ unsigned long saved_auxv[40]; /* for /proc/PID/auxv */
+
+ unsigned dumpable:1;
+#ifdef CONFIG_HUGETLB_PAGE
+ int used_hugetlb;
+#endif
+#ifndef XEN
+ cpumask_t cpu_vm_mask;
+
+ /* Architecture-specific MM context */
+ mm_context_t context;
+
+ /* coredumping support */
+ int core_waiters;
+ struct completion *core_startup_done, core_done;
+
+ /* aio bits */
+ rwlock_t ioctx_list_lock;
+ struct kioctx *ioctx_list;
+
+ struct kioctx default_kioctx;
+#endif
+};
+
+extern struct mm_struct init_mm;
+
+#include <asm/uaccess.h> /* for KERNEL_DS */
+#include <asm/pgtable.h>
+
+#endif /* __ASM_DOMAIN_H__ */
--- /dev/null
+#ifndef _ASM_IA64_INT_H
+#define _ASM_IA64_INT_H
+
+//#include "ia64.h"
+
+#define IA64_VHPT_TRANS_VECTOR 0x0000 /* UNUSED */
+#define IA64_INST_TLB_VECTOR 0x0400
+#define IA64_DATA_TLB_VECTOR 0x0800
+#define IA64_ALT_INST_TLB_VECTOR 0x0c00 /* UNUSED */
+#define IA64_ALT_DATA_TLB_VECTOR 0x1000 /* UNUSED */
+#define IA64_DATA_NESTED_TLB_VECTOR 0x1400
+#define IA64_INST_KEY_MISS_VECTOR 0x1800
+#define IA64_DATA_KEY_MISS_VECTOR 0x1c00
+#define IA64_DIRTY_BIT_VECTOR 0x2000
+#define IA64_INST_ACCESS_BIT_VECTOR 0x2400
+#define IA64_DATA_ACCESS_BIT_VECTOR 0x2800
+#define IA64_BREAK_VECTOR 0x2c00
+#define IA64_EXTINT_VECTOR 0x3000
+#define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000
+#define IA64_KEY_PERMISSION_VECTOR 0x5100
+#define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200
+#define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300
+#define IA64_GENEX_VECTOR 0x5400
+#define IA64_DISABLED_FPREG_VECTOR 0x5500
+#define IA64_NAT_CONSUMPTION_VECTOR 0x5600
+#define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */
+#define IA64_DEBUG_VECTOR 0x5900
+#define IA64_UNALIGNED_REF_VECTOR 0x5a00
+#define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00
+#define IA64_FP_FAULT_VECTOR 0x5c00
+#define IA64_FP_TRAP_VECTOR 0x5d00
+#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00
+#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
+#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
+
+#define IA64_NO_FAULT 0x0000
+#define IA64_RFI_IN_PROGRESS 0x0001
+#define IA64_RETRY 0x0002
+#define IA64_FORCED_IFA 0x0004
+#define IA64_ILLOP_FAULT (IA64_GENEX_VECTOR | 0x00)
+#define IA64_PRIVOP_FAULT (IA64_GENEX_VECTOR | 0x10)
+#define IA64_PRIVREG_FAULT (IA64_GENEX_VECTOR | 0x20)
+#define IA64_RSVDREG_FAULT (IA64_GENEX_VECTOR | 0x30)
+#define IA64_DISIST_FAULT (IA64_GENEX_VECTOR | 0x40)
+#define IA64_ILLDEP_FAULT (IA64_GENEX_VECTOR | 0x80)
+#define IA64_DTLB_FAULT (IA64_DATA_TLB_VECTOR)
+
+#if !defined(__ASSEMBLY__)
+typedef unsigned long IA64FAULT;
+typedef unsigned long IA64INTVECTOR;
+#endif /* !ASSEMBLY */
+#endif
--- /dev/null
+#ifndef _XEN_ASM_INIT_H
+#define _XEN_ASM_INIT_H
+
+/*
+ * Mark functions and data as being only used at initialization
+ * or exit time.
+ */
+#define __init \
+ __attribute__ ((__section__ (".init.text")))
+#define __exit \
+ __attribute_used__ __attribute__ ((__section__(".text.exit")))
+#define __initdata \
+ __attribute__ ((__section__ (".init.data")))
+#define __exitdata \
+ __attribute_used__ __attribute__ ((__section__ (".data.exit")))
+#define __initsetup \
+ __attribute_used__ __attribute__ ((__section__ (".setup.init")))
+#define __init_call \
+ __attribute_used__ __attribute__ ((__section__ (".initcall.init")))
+#define __exit_call \
+ __attribute_used__ __attribute__ ((__section__ (".exitcall.exit")))
+
+/* For assembly routines
+#define __INIT .section ".text.init","ax"
+#define __FINIT .previous
+#define __INITDATA .section ".data.init","aw"
+*/
+
+#endif /* _XEN_ASM_INIT_H */
--- /dev/null
+#ifndef __ASM_IA64_MM_H__
+#define __ASM_IA64_MM_H__
+
+#include <xen/config.h>
+#ifdef LINUX_2_6
+#include <xen/gfp.h>
+#endif
+#include <xen/list.h>
+#include <xen/spinlock.h>
+#include <xen/perfc.h>
+#include <xen/sched.h>
+
+#include <linux/rbtree.h>
+
+#include <asm/processor.h>
+#include <asm/atomic.h>
+#include <asm/flushtlb.h>
+#include <asm/io.h>
+
+#include <public/xen.h>
+
+/*
+ * The following is for page_alloc.c.
+ */
+
+//void init_page_allocator(unsigned long min, unsigned long max);
+//unsigned long __get_free_pages(int order);
+unsigned long __get_free_pages(unsigned int flags, unsigned int order);
+//void __free_pages(unsigned long p, int order);
+#define get_free_page() (__get_free_pages(GFP_KERNEL,0))
+//#define __get_free_page() (__get_free_pages(0))
+//#define free_pages(_p,_o) (__free_pages(_p,_o))
+#define free_xenheap_page(_p) (__free_pages(_p,0))
+#define free_xenheap_pages(a,b) (__free_pages(a,b))
+#define alloc_xenheap_page() (__get_free_pages(GFP_KERNEL,0))
+
+typedef unsigned long page_flags_t;
+
+#define xmem_cache_t kmem_cache_t
+
+// from linux/include/linux/mm.h
+
+extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
+extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
+extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
+
+/*
+ * On a two-level page table, this ends up being trivial. Thus the
+ * inlining and the symmetry break with pte_alloc_map() that does all
+ * of this out-of-line.
+ */
+static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+ if (pgd_none(*pgd))
+ return __pmd_alloc(mm, pgd, address);
+ return pmd_offset(pgd, address);
+}
+
+
+/*
+ * Per-page-frame information.
+ */
+
+//FIXME: This can go away when common/dom0_ops.c is fully arch-independent
+#if 0
+struct pfn_info
+{
+ /* Each frame can be threaded onto a doubly-linked list. */
+ struct list_head list;
+ /* Context-dependent fields follow... */
+ union {
+
+ /* Page is in use by a domain. */
+ struct {
+ /* Owner of this page. */
+ struct domain *domain;
+ /* Reference count and various PGC_xxx flags and fields. */
+ u32 count_info;
+ /* Type reference count and various PGT_xxx flags and fields. */
+ u32 type_info;
+ } inuse;
+
+ /* Page is on a free list. */
+ struct {
+ /* Mask of possibly-tainted TLBs. */
+ unsigned long cpu_mask;
+ /* Must be at same offset as 'u.inuse.count_flags'. */
+ u32 __unavailable;
+ /* Order-size of the free chunk this page is the head of. */
+ u8 order;
+ } free;
+
+ } u;
+
+ /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
+ u32 tlbflush_timestamp;
+};
+#endif
+
+struct page
+{
+ /* Each frame can be threaded onto a doubly-linked list. */
+ struct list_head list;
+ /* Context-dependent fields follow... */
+ union {
+
+ /* Page is in use by a domain. */
+ struct {
+ /* Owner of this page. */
+ struct domain *domain;
+ /* Reference count and various PGC_xxx flags and fields. */
+ u32 count_info;
+ /* Type reference count and various PGT_xxx flags and fields. */
+ u32 type_info;
+ } inuse;
+
+ /* Page is on a free list. */
+ struct {
+ /* Mask of possibly-tainted TLBs. */
+ unsigned long cpu_mask;
+ /* Must be at same offset as 'u.inuse.count_flags'. */
+ u32 __unavailable;
+ /* Order-size of the free chunk this page is the head of. */
+ u8 order;
+ } free;
+
+ } u;
+ /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
+ u32 tlbflush_timestamp;
+// following added for Linux compiling
+ page_flags_t flags;
+ atomic_t _count;
+ struct list_head lru; // is this the same as above "list"?
+};
+
+#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
+
+//FIXME: These can go away when common/dom0_ops.c is fully arch-independent
+ /* The following page types are MUTUALLY EXCLUSIVE. */
+#define PGT_none (0<<29) /* no special uses of this page */
+#define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
+#define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
+#define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
+#define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
+#define PGT_gdt_page (5<<29) /* using this page in a GDT? */
+#define PGT_ldt_page (6<<29) /* using this page in an LDT? */
+#define PGT_writeable_page (7<<29) /* has writable mappings of this page? */
+#define PGT_type_mask (7<<29) /* Bits 29-31. */
+ /* Has this page been validated for use as its current type? */
+#define _PGT_validated 28
+#define PGT_validated (1<<_PGT_validated)
+ /* 28-bit count of uses of this frame as its current type. */
+#define PGT_count_mask ((1<<28)-1)
+
+extern struct pfn_info *frame_table;
+extern unsigned long frame_table_size;
+extern struct list_head free_list;
+extern spinlock_t free_list_lock;
+extern unsigned int free_pfns;
+extern unsigned long max_page;
+void init_frametable(void *frametable_vstart, unsigned long nr_pages);
+void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
+
+static inline void put_page(struct pfn_info *page)
+{
+ dummy();
+}
+
+
+static inline int get_page(struct pfn_info *page,
+ struct domain *domain)
+{
+ dummy();
+}
+
+// see alloc_new_dom_mem() in common/domain.c
+#define set_machinetophys(_mfn, _pfn) do { } while(0);
+
+// FOLLOWING FROM linux-2.6.7/include/mm.h
+
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ struct mm_struct * vm_mm; /* The address space we belong to. */
+ unsigned long vm_start; /* Our start address within vm_mm. */
+ unsigned long vm_end; /* The first byte after our end address
+ within vm_mm. */
+
+ /* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct *vm_next;
+
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
+ unsigned long vm_flags; /* Flags, listed below. */
+
+#ifndef XEN
+ struct rb_node vm_rb;
+
+// XEN doesn't need all the backing store stuff
+ /*
+ * For areas with an address space and backing store,
+ * linkage into the address_space->i_mmap prio tree, or
+ * linkage to the list of like vmas hanging off its node, or
+ * linkage of vma in the address_space->i_mmap_nonlinear list.
+ */
+ union {
+ struct {
+ struct list_head list;
+ void *parent; /* aligns with prio_tree_node parent */
+ struct vm_area_struct *head;
+ } vm_set;
+
+ struct prio_tree_node prio_tree_node;
+ } shared;
+
+ /*
+ * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
+ * list, after a COW of one of the file pages. A MAP_SHARED vma
+ * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
+ * or brk vma (with NULL file) can only be in an anon_vma list.
+ */
+ struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
+ struct anon_vma *anon_vma; /* Serialized by page_table_lock */
+
+ /* Function pointers to deal with this struct. */
+ struct vm_operations_struct * vm_ops;
+
+ /* Information about our backing store: */
+ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
+ units, *not* PAGE_CACHE_SIZE */
+ struct file * vm_file; /* File we map to (can be NULL). */
+ void * vm_private_data; /* was vm_pte (shared mem) */
+
+#ifdef CONFIG_NUMA
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+#endif
+#endif
+};
+/*
+ * vm_flags..
+ */
+#define VM_READ 0x00000001 /* currently active flags */
+#define VM_WRITE 0x00000002
+#define VM_EXEC 0x00000004
+#define VM_SHARED 0x00000008
+
+#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x00000020
+#define VM_MAYEXEC 0x00000040
+#define VM_MAYSHARE 0x00000080
+
+#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#define VM_GROWSUP 0x00000200
+#define VM_SHM 0x00000400 /* shared memory area, don't swap out */
+#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+
+#define VM_EXECUTABLE 0x00001000
+#define VM_LOCKED 0x00002000
+#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
+
+ /* Used by sys_madvise() */
+#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
+#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
+
+#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
+#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
+#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
+#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
+#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
+#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
+
+#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+#endif
+
+#ifdef CONFIG_STACK_GROWSUP
+#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#else
+#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#endif
+
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
+ * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
+ */
+#define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
+#define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
+
+static inline unsigned long page_zonenum(struct page *page)
+{
+ return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
+}
+static inline unsigned long page_to_nid(struct page *page)
+{
+ return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
+}
+
+struct zone;
+extern struct zone *zone_table[];
+
+static inline struct zone *page_zone(struct page *page)
+{
+ return zone_table[page->flags >> NODEZONE_SHIFT];
+}
+
+static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
+{
+ page->flags &= ~(~0UL << NODEZONE_SHIFT);
+ page->flags |= nodezone_num << NODEZONE_SHIFT;
+}
+
+#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
+extern unsigned long max_mapnr;
+#endif
+
+static inline void *lowmem_page_address(struct page *page)
+{
+ return __va(page_to_pfn(page) << PAGE_SHIFT);
+}
+
+#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
+#define HASHED_PAGE_VIRTUAL
+#endif
+
+#if defined(WANT_PAGE_VIRTUAL)
+#define page_address(page) ((page)->virtual)
+#define set_page_address(page, address) \
+ do { \
+ (page)->virtual = (address); \
+ } while(0)
+#define page_address_init() do { } while(0)
+#endif
+
+#if defined(HASHED_PAGE_VIRTUAL)
+void *page_address(struct page *page);
+void set_page_address(struct page *page, void *virtual);
+void page_address_init(void);
+#endif
+
+#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
+#define page_address(page) lowmem_page_address(page)
+#define set_page_address(page, address) do { } while(0)
+#define page_address_init() do { } while(0)
+#endif
+
+
+#ifndef CONFIG_DEBUG_PAGEALLOC
+static inline void
+kernel_map_pages(struct page *page, int numpages, int enable)
+{
+}
+#endif
+
+extern unsigned long num_physpages;
+extern unsigned long totalram_pages;
+extern int nr_swap_pages;
+
+#endif /* __ASM_IA64_MM_H__ */
--- /dev/null
+#ifndef __ASM_MMU_CONTEXT_H
+#define __ASM_MMU_CONTEXT_H
+//dummy file to resolve non-arch-indep include
+#ifdef XEN
+#define IA64_REGION_ID_KERNEL 0
+#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
+
+#ifndef __ASSEMBLY__
+struct ia64_ctx {
+ spinlock_t lock;
+ unsigned int next; /* next context number to use */
+ unsigned int limit; /* next >= limit => must call wrap_mmu_context() */
+ unsigned int max_ctx; /* max. context value supported by all CPUs */
+};
+
+extern struct ia64_ctx ia64_ctx;
+#endif /* ! __ASSEMBLY__ */
+#endif
+#endif /* ! __ASM_MMU_CONTEXT_H */
--- /dev/null
+#ifndef __ASM_IA64_MULTICALL_H__
+#define __ASM_IA64_MULTICALL_H__
+
+#define do_multicall_call(_call) BUG()
+#endif /* __ASM_IA64_MULTICALL_H__ */
--- /dev/null
+//dummy file to resolve non-arch-indep include
+#include <asm/asm-offsets.h>
--- /dev/null
+#ifndef _XEN_IA64_PRIVOP_H
+#define _XEN_IA64_PRIVOP_H
+
+#include <asm/ia64_int.h>
+#include <asm/vcpu.h>
+
+typedef unsigned long IA64_INST;
+
+extern IA64FAULT priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr);
+
+typedef union U_IA64_BUNDLE {
+ unsigned long i64[2];
+ struct { unsigned long template:5,slot0:41,slot1a:18,slot1b:23,slot2:41; };
+ // NOTE: following doesn't work because bitfields can't cross natural
+ // size boundaries
+ //struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; };
+} IA64_BUNDLE;
+
+typedef enum E_IA64_SLOT_TYPE { I, M, F, B, L, ILLEGAL } IA64_SLOT_TYPE;
+
+typedef union U_INST64_A5 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5, imm9d:9, s:1, major:4; };
+} INST64_A5;
+
+typedef union U_INST64_B4 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6, wh:2, d:1, un1:1, major:4; };
+} INST64_B4;
+
+typedef union U_INST64_B8 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
+} INST64_B8;
+
+typedef union U_INST64_B9 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
+} INST64_B9;
+
+typedef union U_INST64_I19 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
+} INST64_I19;
+
+typedef union U_INST64_I26 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4;};
+} INST64_I26;
+
+typedef union U_INST64_I27 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4;};
+} INST64_I27;
+
+typedef union U_INST64_I28 { // not privileged (mov from AR)
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4;};
+} INST64_I28;
+
+typedef union U_INST64_M28 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4;};
+} INST64_M28;
+
+typedef union U_INST64_M29 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4;};
+} INST64_M29;
+
+typedef union U_INST64_M30 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, imm:7, ar3:7,x4:4,x2:2,x3:3,s:1,major:4;};
+} INST64_M30;
+
+typedef union U_INST64_M31 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4;};
+} INST64_M31;
+
+typedef union U_INST64_M32 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4;};
+} INST64_M32;
+
+typedef union U_INST64_M33 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M33;
+
+typedef union U_INST64_M35 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
+
+} INST64_M35;
+
+typedef union U_INST64_M36 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
+} INST64_M36;
+
+typedef union U_INST64_M41 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
+} INST64_M41;
+
+typedef union U_INST64_M42 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M42;
+
+typedef union U_INST64_M43 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M43;
+
+typedef union U_INST64_M44 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
+} INST64_M44;
+
+typedef union U_INST64_M45 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M45;
+
+typedef union U_INST64_M46 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6, x3:3, un1:1, major:4; };
+} INST64_M46;
+
+typedef union U_INST64 {
+ IA64_INST inst;
+ struct { unsigned long :37, major:4; } generic;
+ INST64_A5 A5; // used in build_hypercall_bundle only
+ INST64_B4 B4; // used in build_hypercall_bundle only
+ INST64_B8 B8; // rfi, bsw.[01]
+ INST64_B9 B9; // break.b
+ INST64_I19 I19; // used in build_hypercall_bundle only
+ INST64_I26 I26; // mov register to ar (I unit)
+ INST64_I27 I27; // mov immediate to ar (I unit)
+ INST64_I28 I28; // mov from ar (I unit)
+ INST64_M28 M28; // purge translation cache entry
+ INST64_M29 M29; // mov register to ar (M unit)
+ INST64_M30 M30; // mov immediate to ar (M unit)
+ INST64_M31 M31; // mov from ar (M unit)
+ INST64_M32 M32; // mov reg to cr
+ INST64_M33 M33; // mov from cr
+ INST64_M35 M35; // mov to psr
+ INST64_M36 M36; // mov from psr
+ INST64_M41 M41; // translation cache insert
+ INST64_M42 M42; // mov to indirect reg/translation reg insert
+ INST64_M43 M43; // mov from indirect reg
+ INST64_M44 M44; // set/reset system mask
+ INST64_M45 M45; // translation purge
+ INST64_M46 M46; // translation access (tpa,tak)
+} INST64;
+
+#define MASK_41 ((UINT64)0x1ffffffffff)
+
+extern void privify_memory(void *start, UINT64 len);
+
+#endif
--- /dev/null
+#define XEN_DEFAULT_RID 7
+#define IA64_MIN_IMPL_RID_MSB 17
+#define _REGION_ID(x) ({ia64_rr _v; _v.rrval = (long) (x); _v.rid;})
+#define _REGION_PAGE_SIZE(x) ({ia64_rr _v; _v.rrval = (long) (x); _v.ps;})
+#define _REGION_HW_WALKER(x) ({ia64_rr _v; _v.rrval = (long) (x); _v.ve;})
+#define _MAKE_RR(r, sz, v) ({ia64_rr _v; _v.rrval=0;_v.rid=(r);_v.ps=(sz);_v.ve=(v);_v.rrval;})
+
+typedef union ia64_rr {
+ struct {
+ unsigned long ve : 1; /* enable hw walker */
+ unsigned long : 1; /* reserved */
+ unsigned long ps : 6; /* log page size */
+ unsigned long rid: 24; /* region id */
+ unsigned long : 32; /* reserved */
+ };
+ unsigned long rrval;
+} ia64_rr;
+
+//
+// region register macros
+//
+#define RR_TO_VE(arg) (((arg) >> 0) & 0x0000000000000001)
+#define RR_VE(arg) (((arg) & 0x0000000000000001) << 0)
+#define RR_VE_MASK 0x0000000000000001L
+#define RR_VE_SHIFT 0
+#define RR_TO_PS(arg) (((arg) >> 2) & 0x000000000000003f)
+#define RR_PS(arg) (((arg) & 0x000000000000003f) << 2)
+#define RR_PS_MASK 0x00000000000000fcL
+#define RR_PS_SHIFT 2
+#define RR_TO_RID(arg) (((arg) >> 8) & 0x0000000000ffffff)
+#define RR_RID(arg) (((arg) & 0x0000000000ffffff) << 8)
+#define RR_RID_MASK 0x00000000ffffff00L
+
--- /dev/null
+#include <asm/ptrace.h>
+#define xen_regs pt_regs
--- /dev/null
+
+#define shadow_lock_init(d) do {} while(0)
+#define shadow_mode_init(d) do {} while(0)
--- /dev/null
+#include <xen/linuxtime.h>
--- /dev/null
+#ifndef XEN_ASM_IA64_TLB_H
+#define XEN_ASM_IA64_TLB_H
+
+#define NITRS 8
+#define NDTRS 8
+
+typedef struct {
+ union {
+ struct {
+ unsigned long p : 1; // 0
+ unsigned long : 1; // 1
+ unsigned long ma : 3; // 2-4
+ unsigned long a : 1; // 5
+ unsigned long d : 1; // 6
+ unsigned long pl : 2; // 7-8
+ unsigned long ar : 3; // 9-11
+ unsigned long ppn : 38; // 12-49
+ unsigned long : 2; // 50-51
+ unsigned long ed : 1; // 52
+ };
+ unsigned long page_flags;
+ };
+
+ union {
+ struct {
+ unsigned long : 2; // 0-1
+ unsigned long ps : 6; // 2-7
+ unsigned long key : 24; // 8-31
+ unsigned long : 32; // 32-63
+ };
+ unsigned long itir;
+ };
+
+ unsigned long vadr;
+ unsigned long rid;
+} TR_ENTRY;
+#endif
--- /dev/null
+#ifndef _XEN_IA64_VCPU_H
+#define _XEN_IA64_VCPU_H
+
+// TODO: Many (or perhaps most) of these should eventually be
+// static inline functions
+
+//#include "thread.h"
+#include <asm/ia64_int.h>
+
+typedef unsigned long UINT64;
+typedef unsigned int UINT;
+typedef int BOOLEAN;
+struct exec_domain;
+typedef struct exec_domain VCPU;
+
+// NOTE: The actual VCPU structure (struct virtualcpu) is defined in
+// thread.h. Moving it to here caused a lot of files to change, so
+// for now, we'll leave well enough alone.
+typedef struct pt_regs REGS;
+//#define PSCB(vcpu) (((struct spk_thread_t *)vcpu)->pscb)
+//#define vcpu_regs(vcpu) &((struct spk_thread_t *)vcpu)->thread_regs
+//#define vcpu_thread(vcpu) ((struct spk_thread_t *)vcpu)
+
+/* general registers */
+extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned reg);
+extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value);
+/* application registers */
+extern IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val);
+/* psr */
+extern BOOLEAN vcpu_get_psr_ic(VCPU *vcpu);
+extern UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr);
+extern IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm);
+extern IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm);
+extern IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
+/* control registers */
+extern IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_increment_iip(VCPU *vcpu);
+extern IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval);
+extern unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa);
+extern IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval);
+/* interrupt registers */
+extern IA64FAULT vcpu_get_itv(VCPU *vcpu,UINT64 *pval);
+extern IA64FAULT vcpu_get_pmv(VCPU *vcpu,UINT64 *pval);
+extern IA64FAULT vcpu_get_cmcv(VCPU *vcpu,UINT64 *pval);
+extern IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval);
+extern IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val);
+/* interval timer registers */
+extern IA64FAULT vcpu_set_itm(VCPU *vcpu,UINT64 val);
+extern IA64FAULT vcpu_set_itc(VCPU *vcpu,UINT64 val);
+/* debug breakpoint registers */
+extern IA64FAULT vcpu_set_ibr(VCPU *vcpu,UINT64 reg,UINT64 val);
+extern IA64FAULT vcpu_set_dbr(VCPU *vcpu,UINT64 reg,UINT64 val);
+extern IA64FAULT vcpu_get_ibr(VCPU *vcpu,UINT64 reg,UINT64 *pval);
+extern IA64FAULT vcpu_get_dbr(VCPU *vcpu,UINT64 reg,UINT64 *pval);
+/* performance monitor registers */
+extern IA64FAULT vcpu_set_pmc(VCPU *vcpu,UINT64 reg,UINT64 val);
+extern IA64FAULT vcpu_set_pmd(VCPU *vcpu,UINT64 reg,UINT64 val);
+extern IA64FAULT vcpu_get_pmc(VCPU *vcpu,UINT64 reg,UINT64 *pval);
+extern IA64FAULT vcpu_get_pmd(VCPU *vcpu,UINT64 reg,UINT64 *pval);
+/* banked general registers */
+extern IA64FAULT vcpu_bsw0(VCPU *vcpu);
+extern IA64FAULT vcpu_bsw1(VCPU *vcpu);
+/* region registers */
+extern IA64FAULT vcpu_set_rr(VCPU *vcpu,UINT64 reg,UINT64 val);
+extern IA64FAULT vcpu_get_rr(VCPU *vcpu,UINT64 reg,UINT64 *pval);
+extern IA64FAULT vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr);
+/* protection key registers */
+extern IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
+extern IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
+extern IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
+/* TLB */
+extern IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 padr,
+ UINT64 itir, UINT64 ifa);
+extern IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 padr,
+ UINT64 itir, UINT64 ifa);
+extern IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 padr, UINT64 itir, UINT64 ifa);
+extern IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 padr, UINT64 itir, UINT64 ifa);
+extern IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
+extern IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
+extern IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
+extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
+extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
+extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
+extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
+/* misc */
+extern IA64FAULT vcpu_rfi(VCPU *vcpu);
+extern IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
+
+extern void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
+extern void vcpu_pend_timer(VCPU *vcpu);
+extern void vcpu_poke_timer(VCPU *vcpu);
+extern void vcpu_set_next_timer(VCPU *vcpu);
+extern BOOLEAN vcpu_timer_expired(VCPU *vcpu);
+extern UINT64 vcpu_deliverable_interrupts(VCPU *vcpu);
+extern void vcpu_itc_no_srlz(VCPU *vcpu, UINT64, UINT64, UINT64, UINT64);
+
+
+#endif
--- /dev/null
+#ifndef ASM_VHPT_H
+#define ASM_VHPT_H
+
+#define VHPT_ENABLED 1
+#define VHPT_ENABLED_REGION_0_TO_6 1
+#define VHPT_ENABLED_REGION_7 0
+
+
+#if 0
+#define VHPT_CACHE_ENTRY_SIZE 64
+#define VHPT_CACHE_MASK 2097151
+#define VHPT_CACHE_NUM_ENTRIES 32768
+#define VHPT_NUM_ENTRIES 2097152
+#define VHPT_CACHE_ENTRY_SIZE_LOG2 6
+#define VHPT_SIZE_LOG2 26 //????
+#define VHPT_PAGE_SHIFT 26 //????
+#else
+//#define VHPT_CACHE_NUM_ENTRIES 2048
+//#define VHPT_NUM_ENTRIES 131072
+//#define VHPT_CACHE_MASK 131071
+//#define VHPT_SIZE_LOG2 22 //????
+#define VHPT_CACHE_NUM_ENTRIES 8192
+#define VHPT_NUM_ENTRIES 524288
+#define VHPT_CACHE_MASK 524287
+#define VHPT_SIZE_LOG2 24 //????
+#define VHPT_PAGE_SHIFT 24 //????
+#endif
+
+// FIXME: These should be automatically generated
+
+#define VLE_PGFLAGS_OFFSET 0
+#define VLE_ITIR_OFFSET 8
+#define VLE_TITAG_OFFSET 16
+#define VLE_CCHAIN_OFFSET 24
+
+#define VCE_TITAG_OFFSET 0
+#define VCE_CCNEXT_OFFSET 8
+#define VCE_CCPREV_OFFSET 16
+#define VCE_PGFLAGS_OFFSET 24
+#define VCE_ITIR_OFFSET 32
+#define VCE_FNEXT_OFFSET 32
+#define VCE_CCHEAD_OFFSET 40
+#define VCE_VADDR_OFFSET 48
+
+//FIXME: change and declare elsewhere
+#define CAUSE_VHPT_CC_HANDLED 0
+
+#ifndef __ASSEMBLY__
+
+//
+// VHPT collison chain entry (part of the "V-Cache")
+// DO NOT CHANGE THE SIZE OF THIS STRUCTURE (see vhpt.S banked regs calculations)
+//
+typedef struct vcache_entry {
+ union {
+ struct {
+ unsigned long tag : 63; // 0-62
+ unsigned long ti : 1; // 63
+ };
+ unsigned long ti_tag;
+ };
+
+ struct vcache_entry *CCNext; // collision chain next
+ struct vcache_entry *CCPrev; // collision chain previous
+
+ union {
+ struct {
+ unsigned long p : 1; // 0
+ unsigned long : 1; // 1
+ unsigned long ma : 3; // 2-4
+ unsigned long a : 1; // 5
+ unsigned long d : 1; // 6
+ unsigned long pl : 2; // 7-8
+ unsigned long ar : 3; // 9-11
+ unsigned long ppn : 38; // 12-49
+ unsigned long : 2; // 50-51
+ unsigned long ed : 1; // 52
+
+ unsigned long translation_type : 2; // 53-54 -- hack
+ unsigned long Counter : 9; // 55-63
+ };
+ unsigned long page_flags;
+ };
+
+ union {
+ struct {
+ unsigned long : 2; // 0-1
+ unsigned long ps : 6; // 2-7
+ unsigned long key : 24; // 8-31
+ unsigned long : 32; // 32-63
+ };
+ unsigned long itir;
+
+ //
+ // the free list pointer when entry not in use
+ //
+ struct vcache_entry *FNext; // free list
+ };
+
+ //
+ // store head of collison chain for removal since thash will only work if
+ // current RID is same as when element was added to chain.
+ //
+ struct vhpt_lf_entry *CCHead;
+
+ unsigned long virtual_address;
+
+ unsigned int CChainCnt;
+ unsigned int Signature;
+};
+
+
+//
+// VHPT Long Format Entry (as recognized by hw)
+//
+struct vhpt_lf_entry {
+ unsigned long page_flags;
+ unsigned long itir;
+ unsigned long ti_tag;
+ struct vcache_entry *CChain;
+};
+
+#define INVALID_TI_TAG 0x8000000000000000L
+
+#endif /* !__ASSEMBLY */
+
+#if !VHPT_ENABLED
+#define VHPT_CCHAIN_LOOKUP(Name, i_or_d)
+#else
+#ifdef CONFIG_SMP
+#error "VHPT_CCHAIN_LOOKUP needs a semaphore on the VHPT!"
+#endif
+
+// VHPT_CCHAIN_LOOKUP is intended to run with psr.i+ic off
+#define VHPT_CCHAIN_LOOKUP(Name, i_or_d) \
+ \
+Name:; \
+ mov r31 = pr; \
+ mov r16 = cr.ifa; \
+ movl r30 = int_counts; \
+ ;; \
+ thash r28 = r16; \
+ adds r30 = CAUSE_VHPT_CC_HANDLED << 3, r30; \
+ ;; \
+ ttag r19 = r16; \
+ ld8 r27 = [r30]; \
+ adds r17 = VLE_CCHAIN_OFFSET, r28; \
+ ;; \
+ ld8 r17 = [r17]; \
+ ;; \
+ cmp.eq p6,p0 = 0, r17; \
+ mov r21 = r17; \
+ adds r22 = VCE_CCNEXT_OFFSET, r17; \
+ adds r28 = VLE_ITIR_OFFSET, r28; \
+(p6) br .Out_##Name; \
+ ;; \
+ \
+.loop_##Name:; \
+ ld8 r20 = [r21]; \
+ ld8 r18 = [r22]; \
+ adds r23 = VCE_PGFLAGS_OFFSET, r21; \
+ adds r24 = VCE_ITIR_OFFSET, r21; \
+ cmp.eq p6,p0 = r17, r21; \
+ cmp.eq p7,p0 = r0, r0; \
+ ;; \
+ lfetch [r18]; \
+ cmp.eq.andcm p6,p7 = r19, r20; \
+ mov r21 = r18; \
+ adds r22 = VCE_CCNEXT_OFFSET, r18; \
+(p6) br.spnt .Out_##Name; \
+(p7) br.sptk .loop_##Name; \
+ ;; \
+ \
+ ld8 r26 = [r23]; \
+ ld8 r25 = [r24]; \
+ adds r29 = VLE_TITAG_OFFSET - VLE_ITIR_OFFSET, r28; \
+ adds r27 = 1, r27; \
+ ;; \
+ mov cr.itir = r25; \
+ st8 [r28] = r25, VLE_PGFLAGS_OFFSET - VLE_ITIR_OFFSET; \
+ or r26 = 1, r26; \
+ st8 [r30] = r27; \
+ ;; \
+ itc.i_or_d r26; \
+ ;; \
+ srlz.i_or_d; \
+ ;; \
+ st8 [r28] = r26; \
+ mov pr = r31, 0x1ffff; \
+ st8 [r29] = r20; \
+ rfi; \
+ ;; \
+ \
+.Out_##Name:; \
+ mov pr = r31, 0x1ffff; \
+ ;; \
+.End_##Name:;
+
+// br.cond.sptk.few dorfi;
+
+
+
+#define VHPT_INSERT() \
+ {.mmi;\
+ thash r17 = r16;\
+ or r26 = 1, r26;\
+ nop 0;\
+ ;;\
+ };\
+ {.mii;\
+ ttag r21 = r16;\
+ adds r18 = VLE_ITIR_OFFSET, r17;\
+ adds r19 = VLE_PGFLAGS_OFFSET, r17;\
+ ;;\
+ };\
+ {.mmi;\
+\
+ st8[r18] = r27;\
+ adds r20 = VLE_TITAG_OFFSET, r17;\
+ nop 0;\
+ ;;\
+ };\
+ {.mmb;\
+ st8[r19] = r26;\
+ st8[r20] = r21;\
+ nop 0;\
+ ;;\
+ };\
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#define VHPT_INSERT1() \
+VCacheInsert:;\
+ mov r18 = 1;\
+ extr.u r17 = r27, 2, 6;\
+ ;;\
+\
+\
+ shl r17 = r18, r17;\
+ ;;\
+\
+\
+ add r30 = r16, r17;\
+ ;;\
+\
+.MainLoop:;\
+ thash r18 = r16;\
+ ;;\
+\
+ ttag r24 = r16;\
+ adds r29 = VLE_CCHAIN_OFFSET, r18;\
+ ;;\
+\
+\
+ ld8 r21 = [r29];\
+ ;;\
+\
+ adds r19 = VCE_CCNEXT_OFFSET, r21;\
+ adds r20 = VCE_TITAG_OFFSET, r21;\
+ mov r28 = r21;\
+\
+ cmp.eq p11, p4 = r0, r21;\
+(p11) br FindOne;\
+ ;;\
+\
+\
+.find_loop:;\
+\
+ ld8 r17 = [r19];\
+ ld8 r18 = [r20];\
+ ;;\
+\
+ adds r19 = VCE_CCNEXT_OFFSET, r17;\
+ adds r20 = VCE_TITAG_OFFSET, r17;\
+ cmp.eq.unc p10, p8 = r18, r24;\
+\
+\
+\
+ cmp.eq.unc p1, p2 = r17, r21;\
+\
+\
+(p10) br .FillVce;\
+ ;;\
+\
+\
+(p8) mov r28 = r17;\
+\
+ lfetch [r19];\
+\
+(p2) br .find_loop;\
+ ;;\
+\
+FindOne:;\
+\
+\
+\
+ movl r22 = G_VCacheRpl;\
+ ;;\
+\
+\
+ ld8 r23 = [r22];\
+ ;;\
+\
+\
+ mov r28 = r23;\
+\
+\
+ adds r17 = VCE_FNEXT_OFFSET, r23;\
+\
+\
+ cmp.eq p14, p3 = r0, r23;\
+ ;;\
+\
+(p3) ld8 r23 = [r17];\
+ ;;\
+\
+\
+(p3) st8 [r22] = r23;\
+(p3) br .AddChain;\
+ ;;\
+\
+\
+\
+\
+ movl r24 = VHPT_CACHE_MASK;\
+\
+\
+ adds r25 = 8, r22;\
+ ;;\
+\
+\
+ ld8 r23 = [r25];\
+ ;;\
+\
+\
+ adds r23 = VHPT_CACHE_ENTRY_SIZE, r23;\
+ ;;\
+\
+\
+ and r23 = r23, r24;\
+\
+\
+ movl r17 = G_VHPT_Cache;\
+ ;;\
+\
+\
+ st8 [r25] = r23;\
+\
+\
+ add r28 = r17, r23;\
+ ;;\
+\
+\
+ adds r22 = VCE_CCHEAD_OFFSET, r28;\
+ ;;\
+\
+ ld8 r17 = [r22], VLE_PGFLAGS_OFFSET - VLE_CCHAIN_OFFSET;\
+\
+ adds r19 = VCE_CCNEXT_OFFSET, r28;\
+ adds r20 = VCE_CCPREV_OFFSET, r28;\
+ ;;\
+\
+ ld8 r20 = [r20];\
+ ld8 r19 = [r19];\
+\
+ adds r21 = VLE_CCHAIN_OFFSET, r17;\
+ ;;\
+\
+ ld8 r18 = [r21];\
+\
+\
+ cmp.eq.unc p9, p7 = r19, r28;\
+\
+\
+ adds r23 = VLE_TITAG_OFFSET + 7, r17;\
+\
+\
+ mov r17 = 0x80;\
+ ;;\
+\
+\
+(p9) st8 [r21] = r0;\
+\
+\
+(p9) st1 [r23] = r17;\
+\
+ adds r24 = VCE_CCPREV_OFFSET, r19;\
+ adds r25 = VCE_CCNEXT_OFFSET, r20;\
+\
+\
+(p7) cmp.eq.unc p13, p6 = r18, r28;\
+ ;;\
+\
+(p7) st8 [r24] = r20;\
+(p7) st8 [r25] = r19;\
+\
+ adds r17 = VCE_PGFLAGS_OFFSET, r28;\
+ ;;\
+\
+(p13) st8 [r21] = r19;\
+(p13) ld8 r18 = [r17], VCE_ITIR_OFFSET - VCE_PGFLAGS_OFFSET;\
+ ;;\
+(p13) st8 [r22] = r18, VLE_ITIR_OFFSET - VLE_PGFLAGS_OFFSET;\
+\
+ ;;\
+(p13) ld8 r18 = [r17], VCE_TITAG_OFFSET - VCE_ITIR_OFFSET;\
+ ;;\
+\
+(p13) st8 [r22] = r18, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET;\
+ ;;\
+\
+.AddChain:;\
+\
+\
+ ld8 r24 = [r29];\
+ ;;\
+\
+\
+ st8 [r29] = r28, 0 - VLE_CCHAIN_OFFSET;\
+\
+ adds r25 = VCE_CCNEXT_OFFSET, r28;\
+ adds r19 = VCE_CCPREV_OFFSET, r28;\
+ adds r20 = VCE_CCHEAD_OFFSET, r28;\
+ ;;\
+\
+\
+ st8 [r20] = r29;\
+\
+ cmp.eq p12, p5 = r0, r24;\
+\
+ adds r23 = VCE_CCPREV_OFFSET, r24;\
+ ;;\
+\
+(p12) st8 [r25] = r28;\
+(p12) st8 [r19] = r28;\
+\
+(p5)ld8 r21 = [r23];\
+ adds r29 = VLE_CCHAIN_OFFSET, r29;\
+ ;;\
+\
+(p5)st8 [r25] = r24;\
+(p5)st8 [r19] = r21;\
+\
+ adds r22 = VCE_CCNEXT_OFFSET, r21;\
+ ;;\
+\
+(p5)st8 [r22] = r28;\
+(p5)st8 [r23] = r28;\
+ ;;\
+\
+.FillVce:;\
+ ttag r24 = r16;\
+\
+\
+ adds r29 = 0 - VLE_CCHAIN_OFFSET, r29;\
+ adds r17 = VCE_PGFLAGS_OFFSET, r28;\
+ movl r19 = PAGE_SIZE_OFFSET;\
+ ;;\
+\
+ st8 [r29] = r26, VLE_ITIR_OFFSET - VLE_PGFLAGS_OFFSET;\
+ st8 [r17] = r26, VCE_ITIR_OFFSET - VCE_PGFLAGS_OFFSET;\
+ add r16 = r16, r19;\
+ ;;\
+\
+ st8 [r29] = r27, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET;\
+ st8 [r17] = r27, VCE_TITAG_OFFSET - VCE_ITIR_OFFSET;\
+ ;;\
+\
+ st8 [r29] = r24;\
+ st8 [r17] = r24;\
+\
+ cmp.lt p15, p0 = r16, r30;\
+(p15) br .MainLoop;\
+ ;;\
+
+
+
+
+#endif /* VHPT_ENABLED */
+#endif
--- /dev/null
+// this file is now obsolete and can be removed
+#include <asm/hpsim_ssc.h>
+
+static inline int arch_serial_putc(unsigned char c)
+{
+ if (platform_is_hp_ski()) {
+ ia64_ssc(c, 0, 0, 0, SSC_PUTCHAR);
+ }
+ else {
+// this is tested on HP Longs Peak platform... it
+// will probably work on other Itanium platforms as
+// well, but undoubtedly needs work
+ longs_peak_putc(c);
+ }
+ return 1;
+}
+
--- /dev/null
+/******************************************************************************
+ * arch-ia64/hypervisor-if.h
+ *
+ * Guest OS interface to IA64 Xen.
+ */
+
+#ifndef __HYPERVISOR_IF_IA64_H__
+#define __HYPERVISOR_IF_IA64_H__
+
+// "packed" generates awful code
+#define PACKED
+
+/* Pointers are naturally 64 bits in this architecture; no padding needed. */
+#define _MEMORY_PADDING(_X)
+#define MEMORY_PADDING
+
+#ifndef __ASSEMBLY__
+
+/* NB. Both the following are 64 bits each. */
+typedef unsigned long memory_t; /* Full-sized pointer/address/memory-size. */
+typedef unsigned long cpureg_t; /* Full-sized register. */
+
+typedef struct
+{
+} PACKED execution_context_t;
+
+/*
+ * NB. This may become a 64-bit count with no shift. If this happens then the
+ * structure size will still be 8 bytes, so no other alignments will change.
+ */
+typedef struct {
+ u32 tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
+ u32 tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
+} PACKED tsc_timestamp_t; /* 8 bytes */
+
+#include <asm/tlb.h> /* TR_ENTRY */
+
+typedef struct {
+ unsigned long ipsr;
+ unsigned long iip;
+ unsigned long ifs;
+ unsigned long precover_ifs;
+ unsigned long isr;
+ unsigned long ifa;
+ unsigned long iipa;
+ unsigned long iim;
+ unsigned long unat; // not sure if this is needed until NaT arch is done
+ unsigned long tpr;
+ unsigned long iha;
+ unsigned long itir;
+ unsigned long itv;
+ unsigned long pmv;
+ unsigned long cmcv;
+ unsigned long pta;
+ int interrupt_collection_enabled; // virtual psr.ic
+ int interrupt_delivery_enabled; // virtual psr.i
+ int pending_interruption;
+ int incomplete_regframe; // see SDM vol2 6.8
+ unsigned long delivery_mask[4];
+ int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
+ int banknum; // 0 or 1, which virtual register bank is active
+ unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
+ unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
+ unsigned long rrs[8]; // region registers
+ unsigned long krs[8]; // kernel registers
+ unsigned long pkrs[8]; // protection key registers
+ // FIXME: These shouldn't be here as they can be overwritten by guests
+ // and validation at TLB miss time would be too expensive.
+ TR_ENTRY itrs[NITRS];
+ TR_ENTRY dtrs[NDTRS];
+ TR_ENTRY itlb;
+ TR_ENTRY dtlb;
+ unsigned long irr[4];
+ unsigned long insvc[4];
+ unsigned long iva;
+ unsigned long dcr;
+ unsigned long itc;
+ unsigned long domain_itm;
+ unsigned long domain_timer_interval;
+ unsigned long xen_itm;
+ unsigned long xen_timer_interval;
+//} PACKED arch_shared_info_t;
+} arch_vcpu_info_t; // DON'T PACK
+
+typedef struct {
+} arch_shared_info_t; // DON'T PACK
+
+/*
+ * The following is all CPU context. Note that the i387_ctxt block is filled
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ */
+typedef struct {
+ //unsigned long flags;
+} PACKED full_execution_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __HYPERVISOR_IF_IA64_H__ */
#ifndef __XEN_KEYHANDLER_H__
#define __XEN_KEYHANDLER_H__
-struct xen_regs;
+#include <asm/regs.h>
/*
* Register a callback function for key @key. The callback occurs in